diff --git "a/2191.jsonl" "b/2191.jsonl" new file mode 100644--- /dev/null +++ "b/2191.jsonl" @@ -0,0 +1,642 @@ +{"seq_id":"569711684","text":"import socket, sys, time\n\n\n\nclass Communicator():\n\n def __init__(self): \n \n self.clientSockets = []\n self.serverConnections = []\n self.sockets = []\n self.portNumber = 1031\n self.timeout = 10\n self.delimiter = '~'\n \n self.verbose = True\n\n self.localip = self.getOwnIp()\n self.playerIpAddresses = self.inputExternalIps() #note will ask for input\n\n \"\"\"\n Uses a socket connection to google to get\n local machine's ip address\n \"\"\"\n def getOwnIp(self):\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n try:\n s.connect((\"google.com\", 80))\n except:\n print(\"You may be having internet connection problems\")\n s.close()\n self.closeConnections()\n sys.exit()\n\n ip = s.getsockname()[0]\n \n s.close()\n \n print(\"Your IP address is \" + ip)\n return ip\n \n\n \"\"\"\n Asks user to input other IP addresses and does\n a basic check to see if they are valid.\n \"\"\"\n def inputExternalIps(self):\n ips = []\n userInput = ''\n\n print(\"Please enter other user's IP addresses\")\n print(\"Enter q or quit to stop.\")\n\n while True:\n \n userInput = input(\"Address: \").strip(' ')\n\n if userInput.lower() == 'q' or userInput.lower() == 'quit':\n break\n \n\n if self.testIfIp(userInput):\n ips.append(userInput)\n else:\n print(\"Invalid IP address. Try again.\")\n \n ips.append(self.localip)\n\n ips.sort()\n\n input(\"Press enter when all players have finished entering IP addresses\")\n\n return ips \n\n\n \"\"\"\n checks to see if the ip entered looks like an ip address\n Note that regex would be good\n \"\"\"\n def testIfIp(self, strToCheck):\n \n listOfNums = strToCheck.split('.')\n \n if not len(listOfNums) == 4:\n return False\n\n for val in listOfNums:\n \n if len(val) > 3 or not val.isdigit():\n return False\n\n return True\n\n def getLocalIp(self):\n return self.localip\n\n\n def getAllIps(self):\n return self.playerIpAddresses\n\n \"\"\"\n Connects to all other IP addresses\n \"\"\"\n def connectToIps(self):\n \n for ip in self.playerIpAddresses:\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n\n if ip == self.localip:\n self.createServerConnections()\n connectionMade = True\n\n else:\n\n if self.verbose:\n print(\"Attempting to make client connection.\")\n\n connectionMade = False\n\n portNum = self.getPortNumber()\n\n # Gives a little extra time. \n for placeholder in range(self.timeout):\n\n #this is sorta bad code. Should probably find a way to impove.\n try:\n if self.verbose:\n print(\"Attempting a connection to server\")\n sock.connect((ip, portNum))\n self.clientSockets.append(sock)\n connectionMade = True\n break\n except:\n if self.verbose:\n print(\"Connection failed. Sleeping then trying again.\")\n time.sleep(1)\n\n if not connectionMade:\n print(\"Problem connecting to other players\")\n self.closeConnections()\n sys.exit()\n\n \n \n\n print(\"Successfully connected to other players.\")\n\n\n def createServerConnections(self):\n\n if self.verbose:\n print(\"Attempting to create server side connection.\")\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n self.sockets.append(sock)\n\n host = '' #figure out what this means\n port = self.getPortNumber()\n\n try:\n if self.verbose:\n print(\"Attempting bind\")\n sock.bind((host, port))\n except:\n print(\"Bind failed.\")\n self.closeConnections()\n sys.exit()\n\n for placeHolder in range(len(self.playerIpAddresses)-1):\n\n if self.verbose:\n print(\"Listening...\")\n\n sock.listen(len(self.playerIpAddresses)-1)\n\n conn, addr = sock.accept()\n self.serverConnections.append(conn)\n\n\n print(\"Server side connections successful.\")\n\n \n\n def getPortNumber(self):\n val = self.portNumber\n self.portNumber += 1\n print(\"Port Number:\", val)\n return val\n\n def sendData(self, data):\n\n data = str(data) + self.delimiter\n\n for sock in self.clientSockets:\n try:\n sock.sendall(data.encode('ascii', 'replace'))\n except Exception as e:\n if self.verbose:\n print(\"Problem sending message\")\n print(str(e))\n\n\n # to check that everyone agrees with the connections made\n def recieveDataConfirmation(self, numBytes=1024):\n allData = []\n\n for connection in self.serverConnections:\n \n currentPacket = ''\n currentChar = ''\n counter = 0\n\n while not currentChar == self.delimiter:\n currentPacket += currentChar\n counter += 1\n currentChar = connection.recv(1).decode('ascii')\n if counter >= 100:\n print(\"Too many characters in confirmation!!!\")\n self.closeConnections()\n sys.exit()\n\n\n allData.append(currentPacket)\n\n return allData\n\n # for use of communicating coordinates \n def recieveDataGame(self):\n\n allData = []\n\n for connection in self.serverConnections:\n \n currentPacket = ''\n currentChar = ''\n counter = 0 \n\n while not currentChar == self.delimiter:\n currentPacket += currentChar\n counter += 1\n currentChar = connection.recv(1).decode('ascii')\n if counter >= 100:\n print(\"Too many characters in game!!!\")\n self.closeConnections()\n sys.exit() \n\n\n allData.append(currentPacket)\n \n \n for data in allData:\n data = data.split(' ')\n\n data[1] = int(data[1])\n data[2] = int(data[2])\n data[3] = int(data[3])\n\n returnData = {}\n\n for data in allData:\n dataList = data.split(' ')\n returnData[dataList[0]] = (dataList[1], dataList[2], dataList[3])\n\n return returnData\n \n \n def closeConnections(self):\n\n print(\"Closing client connections\")\n for x in self.clientSockets:\n try:\n x.close()\n except:\n pass\n\n print(\"Closing server connections\")\n for x in self.serverConnections:\n try:\n x.close()\n except:\n pass\n\n print(\"Closing sockets\")\n for x in self.sockets:\n try:\n x.close()\n except:\n pass\n\n\n def confirmConnections(self):\n \n if self.verbose:\n print(\"Confirming connections made to other players.\")\n\n if self.verbose:\n print(\"Sending data to check equality\")\n \n self.sendData(self.playerIpAddresses)\n\n if self.verbose:\n print(\"Recieving data from other players\")\n \n data = self.recieveDataConfirmation()\n\n connectionConfirmed = True\n\n if not len(data) == len(self.playerIpAddresses)-1:\n connectionConfirmed = False\n\n\n for chunk in data:\n if not chunk == str(self.playerIpAddresses):\n connectionConfirmed = False\n\n if connectionConfirmed:\n self.sendData(\"go\")\n else:\n self.sendData(\"error\")\n\n\n if self.verbose:\n print(\"Recieving confirmation messages\")\n \n data = self.recieveDataConfirmation(1024)\n \n print(\"Confirming connections...\")\n \n for chunk in data:\n if chunk == \"error\":\n print(\"Connections could not be confirmed\")\n self.closeConnections()\n sys.exit()\n \n \n # really need to find a better way to do this\n time.sleep(1) \n print(\"Connections confirmed.\")\n\n\n","sub_path":"Communicator.py","file_name":"Communicator.py","file_ext":"py","file_size_in_byte":8920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"305378070","text":"from builtins import object\nimport oauth1\n\nclass RequestValidatorMixin(object):\n '''\n A 'mixin' for OAuth request validation.\n '''\n def __init__(self):\n super(RequestValidatorMixin, self).__init__()\n\n self.oauth_server = oauth1.Server()\n self.signature_method = oauth1.SignatureMethod_HMAC_SHA1()\n #self.signature_method = SignatureMethod_Binary_HMAC_SHA1()\n self.oauth_server.add_signature_method(self.signature_method)\n self.oauth_consumer = oauth1.Consumer(\n self.consumer_key, self.consumer_secret)\n\n def is_valid_request(self, request, parameters={},\n fake_method=None, handle_error=True):\n '''\n Validates an OAuth request using the python-oauth1 library:\n https://github.com/UOC/python-oauth2\n\n '''\n try:\n # Set the parameters to be what we were passed earlier\n # if we didn't get any passed to us now\n\n if not parameters and hasattr(self, 'params'):\n parameters = self.params\n\n method, url, headers, parameters = self.parse_request(\n request, parameters, fake_method)\n\n oauth_request = oauth1.Request.from_request(\n method,\n url,\n headers=headers,\n parameters=parameters)\n\n self.oauth_server.verify_request(\n oauth_request, self.oauth_consumer, {})\n\n except oauth1.MissingSignature as e:\n if handle_error:\n return False\n else:\n raise e\n except oauth1.Error as e:\n key, base = self.signature_method.signing_base(oauth_request, self.oauth_consumer, {})\n sign = self.signature_method.sign(oauth_request, self.oauth_consumer, {})\n raise Exception(\"signature: %s failed with param %s with base string %s\" % (sign, oauth_request.get('oauth_signature'), base))\n\n # Signature was valid\n return True\n\n def parse_request(self, request, parameters):\n '''\n This must be implemented for the framework you're using\n\n Returns a tuple: (method, url, headers, parameters)\n method is the HTTP method: (GET, POST)\n url is the full absolute URL of the request\n headers is a dictionary of any headers sent in the request\n parameters are the parameters sent from the LMS\n '''\n raise NotImplemented\n\n def valid_request(self, request):\n '''\n Check whether the OAuth-signed request is valid and throw error if not.\n '''\n self.is_valid_request(request, parameters={}, handle_error=False)\n\n\nclass FlaskRequestValidatorMixin(RequestValidatorMixin):\n '''\n A mixin for OAuth request validation using Flask\n '''\n\n def parse_request(self, request, parameters=None, fake_method=None):\n '''\n Parse Flask request\n '''\n return (request.method,\n request.url,\n request.headers,\n request.form.copy())\n\n\nclass DjangoRequestValidatorMixin(RequestValidatorMixin):\n '''\n A mixin for OAuth request validation using Django\n '''\n\n def parse_request(self, request, parameters, fake_method=None):\n '''\n Parse Django request\n '''\n return (fake_method or request.method,\n request.build_absolute_uri(),\n request.META,\n (dict(iter(request.POST.items()))\n if request.method == 'POST'\n else parameters))\n\n\nclass WebObRequestValidatorMixin(RequestValidatorMixin):\n '''\n A mixin for OAuth request validation using WebOb\n '''\n def parse_request(self, request, parameters=None, fake_method=None):\n '''\n Parse WebOb request\n '''\n return (request.method,\n request.url,\n request.headers,\n request.POST.mixed())\n\n\nclass TornadoRequestValidatorMixin(RequestValidatorMixin):\n \"\"\"\n A mixin for OAuth request validation using Tornado\n \"\"\"\n\n def parse_request(self, request, parameters=None, fake_method=None):\n \"\"\"\n Parse Tornado request\n \"\"\"\n return (request.request.method,\n request.request.full_url(),\n request.request.headers,\n {key: request.get_argument(key) for key in request.request.arguments}.copy())\n\n\nclass SignatureMethod_Binary_HMAC_SHA1(oauth1.SignatureMethod_HMAC_SHA1):\n\n def check(self, request, consumer, token, signature):\n \"\"\"Returns whether the given signature is the correct signature for\n the given consumer and token signing the given request.\"\"\"\n built = self.sign(request, consumer, token)\n return built == signature.encode()\n","sub_path":"ims_lti_py/request_validator.py","file_name":"request_validator.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"288819704","text":"\nimport re\nimport commands\nimport struct\n\n\nclass LibraryData(object):\n def __init__(self, name, start, end, is_virtual=False, symbols=None):\n self.name = name\n self.start = start\n self.end = end\n self.is_virtual = is_virtual\n if symbols is None:\n symbols = []\n self.symbols = symbols\n\n def read_object_data(self, start_addr=0, reader=None):\n if self.is_virtual:\n return\n self.symbols = read_object(reader, self.name, start_addr)\n return self.symbols\n\n def get_symbols_from(self, cached_lib):\n symbols = []\n for (addr, name) in cached_lib.symbols:\n symbols.append((addr - cached_lib.start + self.start, name))\n self.symbols = symbols\n\n def __repr__(self):\n return '' % (self.name, self.start,\n self.end)\n\n\ndef read_object(reader, name, lib_start_addr, repeat=True):\n if reader is None:\n out = commands.getoutput('nm -n \"%s\"' % name)\n else:\n out = reader(name)\n lines = out.splitlines()\n symbols = []\n for line in lines:\n parts = line.split()\n if len(parts) != 3:\n continue\n start_addr, tp, name = parts\n if tp in ('t', 'T') and not name.startswith('__gcmap'):\n start_addr = int(start_addr, 16) + lib_start_addr\n symbols.append((start_addr, name))\n symbols.sort()\n if repeat and not symbols:\n return read_object(reader, '/usr/lib/debug' + name, lib_start_addr,\n False)\n return symbols\n\n\ndef read_ranges(data):\n ranges = []\n for line in data.splitlines():\n parts = re.split(\"\\s+\", line)\n name = parts[-1]\n start, end = parts[0].split('-')\n start = int('0x' + start, 16)\n end = int('0x' + end, 16)\n if name: # don't map anonymous memory, JIT code will be there\n ranges.append(LibraryData(name, start, end))\n return ranges\n\ndef read_word(fileobj):\n b = fileobj.read(8)\n r = int(struct.unpack('Q', b)[0])\n return r\n\ndef read_string(fileobj):\n lgt = int(struct.unpack('Q', fileobj.read(8))[0])\n return fileobj.read(lgt)\n\nMARKER_STACKTRACE = '\\x01'\nMARKER_VIRTUAL_IP = '\\x02'\nMARKER_TRAILER = '\\x03'\nMARKER_INTERP_NAME = '\\x04'\n\ndef read_prof(fileobj, virtual_ips_only=False): #\n assert read_word(fileobj) == 0 # header count\n assert read_word(fileobj) == 3 # header size\n assert read_word(fileobj) == 0 # version?\n period = read_word(fileobj)\n assert read_word(fileobj) == 0\n\n virtual_ips = []\n profiles = []\n interp_name = None\n\n while True:\n marker = fileobj.read(1)\n if marker == MARKER_STACKTRACE:\n count = read_word(fileobj)\n # for now\n assert count == 1\n depth = read_word(fileobj)\n assert depth <= 2**16, 'stack strace depth too high'\n trace = []\n if virtual_ips_only:\n fileobj.read(8 * depth)\n else:\n for j in range(depth):\n pc = read_word(fileobj)\n if j > 0 and pc > 0:\n pc -= 1\n trace.append(pc)\n profiles.append((trace, 1))\n elif marker == MARKER_INTERP_NAME:\n assert not interp_name, \"Dual interpreter name header\"\n lgt = ord(fileobj.read(1))\n interp_name = fileobj.read(lgt)\n elif marker == MARKER_VIRTUAL_IP:\n unique_id = read_word(fileobj)\n name = read_string(fileobj)\n virtual_ips.append((unique_id, name))\n elif marker == MARKER_TRAILER:\n if not virtual_ips_only:\n symmap = read_ranges(fileobj.read())\n break\n else:\n assert not marker\n symmap = []\n break\n virtual_ips.sort() # I think it's sorted, but who knows\n if virtual_ips_only:\n return virtual_ips\n return period, profiles, virtual_ips, symmap, interp_name\n","sub_path":"vmprof/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"66341970","text":"#!/usr/bin/env python3\r\nimport numpy as np\r\nimport math as mt\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom fractions import Fraction\r\n\r\ndef doChart(results):\r\n # Make a data frame\r\n\r\n plt.style.use('classic')\r\n plt.figure(facecolor=\"white\")\r\n # create a color palette\r\n #results = [[['1/64', '1/32', '1/16', '1/8', '1/4'], [None,1.202179402571835, 1.341384809418804, 1.3913354383878356, 1.3573656829732024, 1.1685441202263724, None, None, None, None]], [['1/64', '1/32', '1/16', '1/8', '1/4'], [None,1.3942670643932806, 1.4097935183934824, 1.4051284156665138, 1.3520189579084085, 1.1597407299727704, None, None, None, None]], [['1/128', '1/64', '1/32', '1/16'], [1.5111044102236884, 1.5060417012130396, 1.4875024126057848, 1.4531187309974394, None,None, None,None,None, None]], [['1/4', '1/2', '1','2','4'], [None,None,None,None,None,1.441065421881735, 1.5199785954245104, 1.5131838819899621, 1.397620046087846, 1.1462550058552963]], [['1/16', '1/8', '1/4','1/2'], [None,None,None,1.3519056005823231, 1.4464883429971844, 1.491400976325972, 1.4652306902400718, None,None,None]]]\r\n palette = plt.get_cmap('Set1')\r\n \r\n labels = ['1/128','1/64', '1/32', '1/16', '1/8', '1/4', '1/2','1','2','4']\r\n legendLabels = ['greedy','greedy(alpha)', 'greedy (optimistic)','ucb','gradient']\r\n plt.xticks(range(len(labels)), labels)\r\n # multiple line plot\r\n num=0\r\n #for column in df.drop('x', axis=1):\r\n # num+=1\r\n # plt.plot(df['x'], df[column], marker='', color=palette(num), linewidth=3, alpha=1, label=column)\r\n for i in range(len(results)):\r\n #labels = results[i][0]\r\n #plt.xticks(range(len(results[i][1])), labels)\r\n plt.plot(results[i][1], color = palette(num), linewidth=3, alpha=1, label=legendLabels[i])\r\n num += 1\r\n \r\n # Add legend\r\n plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1),\r\n ncol=5)\r\n \r\n # Add titles\r\n plt.title(\"Results\", loc='center', fontsize=14, fontweight=0, color='black')\r\n plt.ylabel(\"Average reward\",color='black')\r\n plt.savefig(\"result.png\")\r\n\r\n\r\ndef doThings():\r\n average_rewards = []\r\n for episode in range(args.episodes):\r\n env.reset()\r\n\r\n # TODO: Initialize required values (depending on mode).\r\n whatIKnow = args.initial*np.ones(args.bandits,dtype=float)\r\n whatIKnowC = np.ones(args.bandits, dtype = int)\r\n t = 1\r\n \r\n average_rewards.append(0)\r\n done = False\r\n while not done:\r\n res = None\r\n # TODO: Action selection according to mode\r\n if args.mode == \"greedy\":\r\n greedy = np.random.randint(0, args.epsilon - 1)\r\n if(greedy == 0):\r\n action = np.random.randint(0, args.bandits - 1)\r\n else:\r\n action = whatIKnow.argmax(axis = 0)\r\n elif args.mode == \"ucb\":\r\n forArgMax=countUCB(whatIKnow,whatIKnowC,t)\r\n action = forArgMax.argmax(axis = 0)\r\n t += 1\r\n elif args.mode == \"gradient\":\r\n res = softmax(whatIKnow)\r\n action = np.random.choice(args.bandits,1, p=res)[0]\r\n\r\n _, reward, done, _ = env.step(action)\r\n average_rewards[-1] += reward / args.episode_length\r\n\r\n # TODO: Update parameters\r\n if args.mode != \"gradient\" :\r\n whatIKnowC[action] += 1\r\n whatIKnow[action] = update(whatIKnow, whatIKnowC[action],action, reward)\r\n else :\r\n indicate = np.zeros(args.bandits, dtype=int)\r\n indicate[action] = 1\r\n whatIKnow = whatIKnow + args.alpha*reward*(indicate - res)\r\n \r\n\r\n # Print out final score as mean and variance of all obtained rewards.\r\n meanRew = np.mean(average_rewards)\r\n print(\"Final score: {}, variance: {}\".format(meanRew, np.var(average_rewards)))\r\n return meanRew\r\n\r\ndef update(rewards, count, action, reward):\r\n if args.mode == \"greedy\" :\r\n if args.alpha == 0:\r\n a = 1./count\r\n else:\r\n a = args.alpha\r\n return rewards[action] + a*(reward - rewards[action])\r\n elif args.mode == \"ucb\" :\r\n return rewards[action] + (1./count)*(reward - rewards[action])\r\n\r\ndef countUCB(rewards,counts,t):\r\n res = rewards\r\n res2 = np.reciprocal(counts.astype(float))\r\n# print(res)\r\n# print(args.c)\r\n return res + args.c * np.sqrt(mt.log(t,2)*res2)\r\n\r\ndef softmax(Ht):\r\n ex = np.exp(Ht)\r\n exSum = sum(ex)\r\n return ex/float(exSum)\r\n \r\n \r\n\r\nclass MultiArmedBandits():\r\n def __init__(self, bandits, episode_length):\r\n self._bandits = []\r\n for _ in range(bandits):\r\n self._bandits.append(np.random.normal(0., 1.)) #stredni hodntoty banditu\r\n self._done = True\r\n self._episode_length = episode_length\r\n #print(\"Initialized {}-armed bandit, maximum average reward is {}\".format(bandits, np.max(self._bandits)))\r\n\r\n def reset(self):\r\n self._done = False\r\n self._trials = 0\r\n return None\r\n\r\n def step(self, action):\r\n if self._done:\r\n raise ValueError(\"Cannot step in MultiArmedBandits when there is no running episode\")\r\n self._trials += 1\r\n self._done = self._trials == self._episode_length\r\n reward = np.random.normal(self._bandits[action], 1.) #nahodna odmena z normal dist s jejich stredni hodnotou a rozptylem 1\r\n return None, reward, self._done, {}\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n # Fix random seed\r\n np.random.seed(42)\r\n\r\n # Parse arguments\r\n import argparse\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--bandits\", default=10, type=int, help=\"Number of bandits.\")\r\n parser.add_argument(\"--episodes\", default=1000, type=int, help=\"Training episodes.\")\r\n parser.add_argument(\"--episode_length\", default=1000, type=int, help=\"Number of trials per episode.\")\r\n\r\n parser.add_argument(\"--mode\", default=\"ucb\", type=str, help=\"Mode to use -- greedy, ucb and gradient.\")\r\n parser.add_argument(\"--alpha\", default=0, type=float, help=\"Learning rate to use (if applicable).\")\r\n parser.add_argument(\"--c\", default=1., type=float, help=\"Confidence level in ucb.\")\r\n parser.add_argument(\"--epsilon\", default= 64, type=float, help=\"Exploration factor (if applicable).\")\r\n parser.add_argument(\"--initial\", default=0, type=float, help=\"Initial value function levels.\")\r\n args = parser.parse_args()\r\n\r\n env = MultiArmedBandits(args.bandits, args.episode_length)\r\n\r\n results = []\r\n# \r\n args.mode = \"greedy\"\r\n nove = []\r\n #greedy\r\n x = ['1/64', '1/32', '1/16', '1/8', '1/4']\r\n y =[None]\r\n args.alpha = 0\r\n args.initial = 0\r\n for e in [64,32, 16, 8, 4]:\r\n args.epsilon = e\r\n res = doThings()\r\n y.append(res)\r\n y.extend([None,None,None,None])\r\n nove.append(x)\r\n nove.append(y)\r\n results.append(nove)\r\n \r\n \r\n #greedy with alpha\r\n args.mode = \"greedy\"\r\n nove = []\r\n x = ['1/64', '1/32', '1/16', '1/8', '1/4']\r\n y =[None]\r\n args.alpha = 0.15\r\n args.initial = 0\r\n for e in [64,32, 16, 8, 4]:\r\n args.epsilon = e\r\n res = doThings()\r\n y.append(res)\r\n y.extend([None,None,None,None])\r\n nove.append(x)\r\n nove.append(y)\r\n results.append(nove)\r\n \r\n #greedy with alpha and initial\r\n nove = []\r\n x = ['1/128','1/64', '1/32', '1/16']\r\n y =[]\r\n args.alpha = 0.15\r\n args.initial = 1\r\n for e in [128, 64, 32, 16]:\r\n args.epsilon = e\r\n res = doThings()\r\n y.append(res)\r\n y.extend([None,None,None,None,None,None])\r\n nove.append(x)\r\n nove.append(y)\r\n results.append(nove)\r\n# \r\n #ucb\r\n nove = []\r\n x = ['1/4', '1/2', '1', '2', '4']\r\n y =[None,None,None,None,None]\r\n args.initial = 0\r\n args.alpha = 0\r\n args.mode = \"ucb\"\r\n for c in [1./4,1./2,1.,2.,4.]:\r\n args.c = c\r\n res = doThings()\r\n x.append(c)\r\n y.append(res)\r\n nove.append(x)\r\n nove.append(y)\r\n results.append(nove)\r\n \r\n #gradient\r\n nove = []\r\n x = ['1/16', '1/8', '1/4','1/2']\r\n y =[None,None,None]\r\n args.mode = \"gradient\"\r\n args.initial = 0\r\n for a in [16,8,4,2]:\r\n args.alpha = np.reciprocal(float(a))\r\n res = doThings()\r\n x.append(Fraction(1,a))\r\n y.append(res)\r\n y.extend([None,None,None])\r\n nove.append(x)\r\n nove.append(y)\r\n results.append(nove)\r\n \r\n print(results) \r\n doChart(results)\r\n \r\n \r\n\r\n \r\n \r\n ","sub_path":"DeepReinforcement/prvnictyriukoly/Doubravova01D.py","file_name":"Doubravova01D.py","file_ext":"py","file_size_in_byte":8619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"151102361","text":"import click\nfrom click import Abort\nfrom scrapy.crawler import CrawlerProcess\n\nfrom scraper.virtual_account import BCAScrapy, BNIScrapy, BRIScrapy, PermataScrapy, MandiriScrapy\n\n\n@click.group()\ndef cli():\n pass\n\n\n@click.command()\n@click.argument('name')\n@click.argument('va_number')\n@click.argument('biller_code', required=False)\ndef va(name, va_number, biller_code=None):\n process = CrawlerProcess()\n\n if name == 'bca':\n process.crawl(BCAScrapy, va_number=va_number)\n\n if name == 'bri':\n process.crawl(BRIScrapy, va_number=va_number)\n\n if name == 'bni':\n process.crawl(BNIScrapy, va_number=va_number)\n\n if name == 'permata':\n process.crawl(PermataScrapy, va_number=va_number)\n\n if name == 'mandiri':\n biller_code = biller_code if biller_code else click.prompt('Input Biller Code:')\n process.crawl(MandiriScrapy, va_number=va_number, biller_code=biller_code)\n\n process.start()\n\n\ncli.add_command(va)\n\n\nif __name__ == '__main__':\n cli()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"507640089","text":"# -*- encoding:utf-8\nimport dpkt\nimport socket\nimport scapy\nfrom scapy.all import *\nfrom scapy.utils import PcapReader\nimport numpy as np \nimport pandas as pd\nimport random\nimport csv\nimport math\nfrom scipy.sparse import csr_matrix, hstack\nfrom sklearn import svm\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.model_selection import train_test_split\n\n# 基于端口号的分类。单分类器分类\n\nf = open('../../../pcaptest_20190516/0.pcap', 'rb')\npcap = dpkt.pcap.Reader(f)\n\n\ndef inet_to_str(inet):\n try:\n return socket.inet_ntop(socket.AF_INET,inet)\n except:\n return socket.inet_ntop(socket.AF_INET6,inet)\n\nfeature_list = []\nnum = 0\nfor ts,buf in pcap:\n\tfeature =[]\n\tnum += 1\n\tprint(num)\n\tethData = dpkt.ethernet.Ethernet(buf) # 物理层\n\tipData = ethData.data # 网络层\n\ttransData = ipData.data # 传输层\n\tappData = transData.data # 应用层\n\tif num > 10000:\n\t\tbreak\n\tfeature.append(transData.dport) # 每个包的属性列表暂时只添加包的目的端口号\n\tfeature_list.append(feature) # 通过dst port分析\n\nprint(\"--------------------------------------\")\nprint(\"pcap dst port type num:\", len(feature_list))\n\nport_official_csv = pd.read_csv(\"./service-names-port-numbers.csv\")\n\nport_official_useful = {}\nfor i in range(0, len(port_official_csv)):\n if (port_official_csv['Service Name'][i] is not np.nan) and (port_official_csv['Port Number'][i] is not np.nan):\n port_official_useful[port_official_csv['Port Number'][i]] = port_official_csv['Service Name'][i]\n\nprint(\"--------------------------------------\")\nprint(\"official useful port number:\",len(port_official_useful))\n\napp_list = []\nfor i in range(0, len(feature_list)):\n\tx = str(feature_list[i][0]) # 第一个属性为端口号\n\tif (x in port_official_useful.keys()):\n\t\tapp_list.append(port_official_useful[x]) \n\telse:\n\t\tapp_list.append('unknown')\n\nprint(\"--------------------------------------\")\nprint(\"label set num:\",len(app_list))\n\nfeatureTrain, featureTest, appTrain, appTest = train_test_split(feature_list, app_list, test_size = 0.2, random_state = 42)\n\n# KNN Classifier \nknnClf = KNeighborsClassifier()\nknnClf.fit(featureTrain, appTrain)\nknnResult = knnClf.predict(featureTest)\n\nknnAcc = 0\nfor i in range(0, len(knnResult)):\n\tif knnResult[i] == appTest[i]:\n\t\tknnAcc += 1\nprint(\"KNN Accuracy:\", (knnAcc*1.0)/len(appTest))\n\n# Logistic Regression Classifier \nlrClf = LogisticRegression(penalty='l2')\nlrClf.fit(featureTrain, appTrain)\nlrResult = lrClf.predict(featureTest)\n\nlrAcc = 0\nfor i in range(0, len(lrResult)):\n\tif lrResult[i] == appTest[i]:\n\t\tlrAcc += 1\nprint(\"Logistic Regression Accuracy:\", (lrAcc*1.0)/len(appTest))\n\n# Random Forest Classifier\nrfClf = RandomForestClassifier(n_estimators=8)\nrfClf.fit(featureTrain, appTrain)\nrfResult = rfClf.predict(featureTest)\n\nrfAcc = 0\nfor i in range(0, len(rfResult)):\n\tif rfResult[i] == appTest[i]:\n\t\trfAcc += 1\nprint(\"Random Forest Accuracy:\", (rfAcc*1.0)/len(appTest))\n\n# Decision Tree Classifier\ndtClf = DecisionTreeClassifier()\ndtClf.fit(featureTrain, appTrain)\ndtResult = dtClf.predict(featureTest)\n\ndtAcc = 0\nfor i in range(0, len(dtResult)):\n\tif dtResult[i] == appTest[i]:\n\t\tdtAcc += 1\nprint(\"Decsion Tree Accuracy:\", (dtAcc*1.0)/len(appTest))\n\n# GBDT(Gradient Boosting Decision Tree) Classifier \ngbdtClf = GradientBoostingClassifier(n_estimators=200)\ngbdtClf.fit(featureTrain, appTrain)\ngbdtResult = gbdtClf.predict(featureTest)\n\ngbdtAcc = 0\nfor i in range(0, len(gbdtResult)):\n\tif gbdtResult[i] == appTest[i]:\n\t\tgbdtAcc += 1\nprint(\"GBDT Accuracy:\", (gbdtAcc*1.0)/len(appTest))\n\n#AdaBoost Classifier\nadaClf = AdaBoostClassifier()\nadaClf.fit(featureTrain, appTrain)\nadaResult = adaClf.predict(featureTest)\n\nadaAcc = 0\nfor i in range(0, len(adaResult)):\n\tif adaResult[i] == appTest[i]:\n\t\tadaAcc += 1\nprint(\"AdaBoost Accuracy:\", (adaAcc*1.0)/len(appTest))\n\n# GaussianNB\ngaussClf = GaussianNB()\ngaussClf.fit(featureTrain, appTrain)\ngaussResult = gaussClf.predict(featureTest)\n\ngaussAcc = 0\nfor i in range(0, len(gaussResult)):\n\tif gaussResult[i] == appTest[i]:\n\t\tgaussAcc += 1\nprint(\"GaussianNB Accuracy:\", (gaussAcc*1.0)/len(appTest))\n\n# Multinomial Naive Bayes Classifier\nmnbClf = MultinomialNB(alpha = 0.01)\nmnbClf.fit(featureTrain, appTrain)\nmnbResult = mnbClf.predict(featureTest)\n\nmnbAcc = 0\nfor i in range(0, len(mnbResult)):\n\tif mnbResult[i] == appTest[i]:\n\t\tmnbAcc += 1\nprint(\"Multinomial Naive Bayes Accuracy:\", (mnbAcc*1.0)/len(appTest))","sub_path":"xumw/C4/网络技术挑战赛选拔赛材料_第二阶段/MachineLearingSolve/single_machine_learning.py","file_name":"single_machine_learning.py","file_ext":"py","file_size_in_byte":4785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"29840002","text":"# code from https://stackoverflow.com/questions/46170577/find-closest-line-to-each-point-on-big-dataset-possibly-using-shapely-and-rtree\r\nimport geojson\r\nimport sys\r\nimport os.path\r\n\r\nfrom typing import Tuple, List\r\nfrom rtree import index\r\nfrom tqdm import tqdm\r\nfrom networkx import DiGraph\r\nfrom shapely.geometry import Polygon, Point\r\nfrom roadmaptools.printer import print_info\r\nfrom roadmaptools.road_structures import LinestringEdge\r\nfrom roadmaptools.graph import RoadGraph\r\n\r\n\r\nclass RoadGraphRtree:\r\n def __init__(self, road_graph: RoadGraph, search_size: int = 500, path: str = None):\r\n self.search_size = search_size\r\n self.index = self._build_index(road_graph, path)\r\n\r\n @staticmethod\r\n def _build_index(road_graph: RoadGraph, path: str = None):\r\n if path:\r\n cache_ready = os.path.isfile(path + \".idx\")\r\n idx = index.Index(path)\r\n else:\r\n cache_ready = False\r\n idx = index.Index()\r\n if not cache_ready:\r\n print_info(\"Creating R-tree from geojson roadmap\")\r\n for from_node, to_node, data in tqdm(road_graph.graph.edges(data=True), desc=\"processing edges\"):\r\n edge: LinestringEdge = data[\"edge\"]\r\n # data[\"attr\"][\"from\"] = from_node\r\n # data[\"attr\"][\"to\"] = to_node\r\n idx.insert(data[\"id\"], edge.linestring.bounds, edge)\r\n if path:\r\n idx.close()\r\n idx = index.Index(path)\r\n return idx\r\n\r\n def get_nearest_edge(self, point: Point):\r\n search_bounds = Point(point).buffer(self.search_size).bounds\r\n candidates = self.index.intersection(search_bounds, objects='raw')\r\n min_distance = sys.maxsize\r\n nearest = None\r\n for candidate in candidates:\r\n edge: LinestringEdge = candidate\r\n distance = point.distance(edge.linestring)\r\n if distance < min_distance:\r\n min_distance = distance\r\n nearest = edge\r\n\r\n if not nearest:\r\n print_info(\"No edge found in specified distance ({} m).\".format(self.search_size))\r\n\r\n envelope = Polygon(((search_bounds[0], search_bounds[3]), (search_bounds[2], search_bounds[3]),\r\n (search_bounds[2], search_bounds[1]), (search_bounds[0], search_bounds[1])))\r\n if not envelope.intersects(nearest.linestring):\r\n print_info(\"solution does not have to be exact\")\r\n\r\n return nearest\r\n\r\n def get_edges_in_area(self, area_bounds: Polygon) -> List[LinestringEdge]:\r\n # edges whose bounding box intersects the area\r\n potential_edges_in_area = self.index.intersection(area_bounds.bounds, objects='raw')\r\n\r\n edges_in_area = []\r\n for candidate in potential_edges_in_area:\r\n if area_bounds.intersects(candidate.linestring):\r\n edges_in_area.append(candidate)\r\n return edges_in_area\r\n","sub_path":"roadmaptools/road_graph_rtree.py","file_name":"road_graph_rtree.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"323021293","text":"#!/usr/bin/env python3\nfrom mq_server_base import MessageQueueServerBase,MessageQueueClientBase\nimport argparse\n\nimport os, sys\nimport time\nimport re\nimport gc\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom config import config\nfrom super_model import SuperNetwork\nfrom train import infer\nimport numpy as np\nimport functools\nprint=functools.partial(print,flush=True)\n\nsys.path.append(\"../..\")\nfrom utils import *\n\nclass TorchMonitor(object):\n def __init__(self):\n self.obj_set=set()\n for obj in gc.get_objects():\n if torch.is_tensor(obj) and obj not in self.obj_set:\n self.obj_set.add(obj)\n\n def find_leak_tensor(self):\n for obj in gc.get_objects():\n if torch.is_tensor(obj) and obj not in self.obj_set:\n print(obj.size())\n\n\nclass TestClient(MessageQueueClientBase):\n def __init__(self,*,random):\n if random:\n super().__init__(config.host, config.port, config.username,\n config.random_test_send_pipe, config.random_test_recv_pipe)\n else:\n super().__init__(config.host, config.port, config.username,\n config.test_send_pipe, config.test_recv_pipe)\n\n def send(self,cand):\n assert isinstance(cand,tuple)\n return super().send(cand)\n\nclass TestServer(MessageQueueServerBase):\n def __init__(self, batchsize, train_dir, val_dir,*,random):\n if random:\n super().__init__(config.host, config.port, config.username, \n config.random_test_send_pipe, config.random_test_recv_pipe)\n else:\n super().__init__(config.host, config.port, config.username, \n config.test_send_pipe, config.test_recv_pipe)\n self.model = None\n self.criterion = CrossEntropyLabelSmooth(1000, 0.1)\n self.criterion = self.criterion.cuda()\n\n # Prepare data\n train_loader = get_train_dataloader(train_dir, batchsize, 0, 100000)\n self.train_dataprovider = DataIterator(train_loader)\n val_loader = get_val_dataloader(val_dir)\n self.val_dataprovider = DataIterator(val_loader)\n\n def eval(self, cand):\n print('cand={}'.format(cand))\n self.model = SuperNetwork().cuda()\n assert(os.path.exists(config.net_cache))\n load(self.model, config.net_cache)\n res = self._test_candidate(cand)\n return res\n\n def _test_candidate(self, cand):\n res = dict() \n try:\n t0 = time.time()\n print('starting inference...')\n Top1_acc = self._inference(np.array(cand).astype(np.int))\n print('time: {}s'.format(time.time() - t0))\n res = {'status': 'success', 'acc': Top1_acc}\n return res\n except:\n import traceback\n traceback.print_exc()\n os._exit(1)\n res['status'] = 'failure'\n return res\n \n def _inference(self, cand):\n t0 = time.time()\n print('testing model {} ..........'.format(cand))\n recalculate_bn(self.model, cand, self.train_dataprovider)\n torch.cuda.empty_cache()\n recal_bn_time = time.time() - t0\n test_top1_acc, _ = infer(self.val_dataprovider, self.model, self.criterion, cand)\n testtime = time.time() - t0\n print('|=> valid: accuracy = {:.3f}%, total_test_time = {:.2f}s, recal_bn_time={:.2f}s, cand = {}'.format(test_top1_acc, testtime, recal_bn_time, cand))\n return test_top1_acc\n\n\ndef main():\n parser = argparse.ArgumentParser(\"ImageNet\")\n parser.add_argument('-b', '--batch_size', type=int, default=512)\n parser.add_argument('-p', '--process', type=int, default=1)\n parser.add_argument('-r', '--reset', action='store_true')\n parser.add_argument('--random', action='store_true')\n parser.add_argument('--train_dir', type=str, default='../../data/train', help='path to training dataset')\n parser.add_argument('--test_dir', type=str, default='../../data/test', help='path to test dataset')\n args=parser.parse_args()\n train_server = TestServer(args.batch_size, args.train_dir, args.test_dir, random=args.random)\n train_server.run(args.process, reset_pipe=args.reset)\n \nif __name__ == \"__main__\":\n try:\n main()\n except:\n import traceback\n traceback.print_exc()\n print(flush=True)\n os._exit(1)\n","sub_path":"NAS/AngleNAS/FairNAS/searching/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"40050198","text":"import argparse\nfrom pynn_object_serialisation.functions import extract_parameters\n\n#Parses filename from arg\nparser = argparse.ArgumentParser(\n description='parameter extractor argparser',\n formatter_class=argparse.RawTextHelpFormatter)\n\nparser.add_argument('model', type=str,\n help='model to extract parameters from')\n\nparser.add_argument('output_dir', type=str,\n help='the root directory for the output')\n\nargs = parser.parse_args()\n\n\n#runs the parameter extractor on them\ntry:\n extract_parameters(args.model, args.output_dir)\nexcept Exception as e:\n print(\"Something went wrong.\\n\")\n print(e)\n","sub_path":"pynn_object_serialisation/parameter_extractor.py","file_name":"parameter_extractor.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"36637833","text":"import copy\n\n\ndef count_S(T):\n return sum(T)\n\n\ndef count_P(T1, T2):\n P = 0\n for i in range(len(T1)):\n P += (T1[i] * T2[i])\n return P\n\n\ndef count_T(p, xl, xr, yd, yu):\n x = p[0]\n y = p[1]\n T = [0, 0, 0, 0]\n T[0] = 1 if x < xl else 0\n T[1] = 1 if x > xr else 0\n T[2] = 1 if y < yd else 0\n T[3] = 1 if y > yu else 0\n return T\n\n\ndef sort_cutter(arr):\n if arr[0][0] > arr[1][0]:\n arr[0], arr[1] = arr[1], arr[0]\n return arr\n\n\ndef easy_cut(xl, xr, yd, yu, p1, p2):\n T1 = count_T(p1, xl, xr, yd, yu)\n T2 = count_T(p2, xl, xr, yd, yu)\n\n FL = 0\n m = 99999 # Infinity\n\n S1 = count_S(T1)\n S2 = count_S(T2)\n\n Q = p1\n r1 = copy.deepcopy(p1)\n r2 = copy.deepcopy(p2)\n\n if (S1 == 0) and (S2 == 0):\n return B_shtrih(FL, r1, r2)\n\n P = count_P(T1, T2)\n if P != 0:\n return B(r1, r2)\n\n if S1 == 0:\n r1 = copy.deepcopy(p1)\n Q = copy.deepcopy(p2)\n i = 2\n return A(FL, i, Q, p1, p2, r1, r2, xl, xr, yd, yu, False)\n\n if S2 == 0:\n r1 = copy.deepcopy(p2)\n Q = copy.deepcopy(p1)\n i = 2\n return A(FL, i, Q, p1, p2, r1, r2, xl, xr, yd, yu, False)\n\n i = 0\n return A(FL, i, Q, p1, p2, r1, r2, xl, xr, yd, yu)\n\n\ndef B_shtrih(FL, p1, p2):\n if FL == 0:\n return True, p1, p2\n else:\n return False, p1, p2\n\n\ndef B(p1, p2):\n return B_shtrih(1, p1, p2)\n\n\ndef A_skip_1(FL, i, Q, p1, p2, r1, r2, xl, xr, yd, yu, m):\n if m == 0:\n return B(r1, r2)\n\n if Q[1] < yd:\n x = (yd - Q[1]) / m + Q[0]\n\n if xl <= x and x <= xr:\n if i == 1:\n r1[0] = x\n r1[1] = yd\n else:\n r2[0] = x\n r2[1] = yd\n return A(FL, i, Q, p1, p2, r1, r2, xl, xr, yd, yu)\n\n if Q[1] > yu:\n x = (yu - Q[1]) / m + Q[0]\n\n if xl <= x and x <= xr:\n if i == 1:\n r1[0] = x\n r1[1] = yu\n else:\n r2[0] = x\n r2[1] = yu\n return A(FL, i, Q, p1, p2, r1, r2, xl, xr, yd, yu)\n\n return B(r1, r2)\n\n\ndef A(FL, i, Q, p1, p2, r1, r2, xl, xr, yd, yu, flag=True):\n if flag:\n i += 1\n if i > 2:\n return B_shtrih(FL, r1, r2)\n\n Q = p1 if i == 1 else p2\n\n if p1[0] == p2[0]:\n return A_skip_1(FL, i, Q, p1, p2, r1, r2, xl, xr, yd, yu, 99999)\n\n m = (p2[1] - p1[1]) / (p2[0] - p1[0])\n\n if Q[0] < xl:\n y = m * (xl - Q[0]) + Q[1]\n\n if yd <= y and y <= yu:\n if i == 1:\n r1[0] = xl\n r1[1] = y\n else:\n r2[0] = xl\n r2[1] = y\n return A(FL, i, Q, p1, p2, r1, r2, xl, xr, yd, yu)\n\n if Q[0] > xr:\n y = m * (xr - Q[0]) + Q[1]\n\n if yd <= y and y <= yu:\n if i == 1:\n r1[0] = xr\n r1[1] = y\n else:\n r2[0] = xr\n r2[1] = y\n return A(FL, i, Q, p1, p2, r1, r2, xl, xr, yd, yu)\n\n return A_skip_1(FL, i, Q, p1, p2, r1, r2, xl, xr, yd, yu, m)\n","sub_path":"lab_7/simple_cut.py","file_name":"simple_cut.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"161748612","text":"from __future__ import print_function\nimport argparse\nimport os\nimport random\nimport sys\nsys.path.append(os.getcwd())\nimport pickle\nimport pdb\nimport time\nimport numpy as np\nimport json\nimport progressbar\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\n\nfrom misc.metrics import *\nfrom misc.utils import repackage_hidden, clip_gradient, adjust_learning_rate,decode_txt, sample_batch_neg, l2_norm\nfrom misc.dataLoader import Data\nimport misc.model as model\nimport datetime\nfrom tensorboardX import SummaryWriter\n\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--input_video_h5', default='../Humour_dialog/data/video_feat.hdf5', help='path to dataset, now hdf5 file')\nparser.add_argument('--input_text_h5', default='../Humour_dialog/data/text_feat.hdf5', help='path to dataset, now hdf5 file')\nparser.add_argument('--input_json', default='../Humour_dialog/data/dict.json', help='path to dataset, now hdf5 file')\nparser.add_argument('--outf', default='./save', help='folder to output images and model checkpoints')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=0)\nparser.add_argument('--batchSize', type=int, default=64, help='input batch size')\nparser.add_argument('--save_iter', type=int, default=2, help='number of epochs to train for')\n\nparser.add_argument('--encoder', default='G_QIH_VGG', help='what encoder to use.')\nparser.add_argument('--model_path', default='', help='folder to output images and model checkpoints')\nparser.add_argument('--num_val', default=1000, help='number of image split out as validation set.')\n\n\n# parser.add_argument('--model_path_D', default='save/D.20-9-20/epoch_10.pth', help='folder to output images and model checkpoints')\n# parser.add_argument('--model_path_G', default='save/G_QIH_VGG.21-9-12/epoch_12.pth', help='folder to output images and model checkpoints')\nparser.add_argument('--model_path_D', default='', help='folder to output images and model checkpoints')\nparser.add_argument('--model_path_G', default='', help='folder to output images and model checkpoints')\n\n\nparser.add_argument('--niter', type=int, default=50, help='number of epochs to train for')\nparser.add_argument('--start_epoch', type=int, default=0, help='start of epochs to train for')\n\n\n\nparser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')\nparser.add_argument('--lr', type=float, default=0.00004, help='learning rate for, default=0.00005')\nparser.add_argument('--beta1', type=float, default=0.8, help='beta1 for adam. default=0.5')\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')\nparser.add_argument('--verbose' , action='store_true', help='show the sampled caption')\n\nparser.add_argument('--conv_feat_size', type=int, default=4096, help='input batch size')\nparser.add_argument('--model', type=str, default='LSTM', help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')\nparser.add_argument('--ninp', type=int, default=300, help='size of word embeddings')\nparser.add_argument('--nhid', type=int, default=512, help='humber of hidden units per layer')\nparser.add_argument('--nlayers', type=int, default=1, help='number of layers')\nparser.add_argument('--dropout', type=int, default=0.4, help='number of layers')\nparser.add_argument('--mos', action='store_true', help='whether to use Mixture of Softmaxes layer')\nparser.add_argument('--clip', type=float, default=5, help='gradient clipping')\nparser.add_argument('--margin', type=float, default=2, help='number of epochs to train for')\n\nparser.add_argument('--log_interval', type=int, default=50, help='how many iterations show the log info')\nparser.add_argument('--monte_carlo_simulations', default=100,help='Number of sampling')\nparser.add_argument('--ans_classes', default=100,help='Number of ans class for discriminitor')\n\nopt = parser.parse_args()\nprint(opt)\n\n\nopt.manualSeed = random.randint(1, 10000) # fix seed\n\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\ncudnn.benchmark = True\n\nif torch.cuda.is_available() and not opt.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\nif opt.model_path != '':\n print(\"=> loading checkpoint '{}'\".format(opt.model_path))\n checkpoint = torch.load(opt.model_path)\n model_path = opt.model_path\n opt = checkpoint['opt']\n opt.start_epoch = checkpoint['epoch']\n opt.model_path = model_path\n opt.batchSize = 128\n opt.niter = 100\nelse:\n t = datetime.datetime.now()\n cur_time = '%s-%s-%s' %(t.day, t.month, t.hour)\n save_path = os.path.join(opt.outf, opt.encoder + '.' + cur_time)\n try:\n os.makedirs(save_path)\n except OSError:\n pass\nwriter = SummaryWriter(save_path)\n####################################################################################\n# Data Loader\n####################################################################################\n\n\n\ndataset_train = Data(input_video_file=opt.input_video_h5, input_text_file=opt.input_text_h5,input_json=opt.input_json, data_split = 'Train')\ndataset_val = Data(input_video_file=opt.input_video_h5, input_text_file=opt.input_text_h5,input_json=opt.input_json, data_split = 'Val')\ndataset_test = Data(input_video_file=opt.input_video_h5, input_text_file=opt.input_text_h5,input_json=opt.input_json, data_split = 'Test')\n\n\ndataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=opt.batchSize,shuffle=True, num_workers=int(opt.workers))\ndataloader_val = torch.utils.data.DataLoader(dataset_val, batch_size=4,shuffle=False, num_workers=int(opt.workers))\ndataloader_test = torch.utils.data.DataLoader(dataset_test, batch_size=4,shuffle=False, num_workers=int(opt.workers))\n\n\n\n####################################################################################\n# Build the Model\n####################################################################################\n\n# vocab_size = dataset_train.vocab_size\n# ques_length = dataset_train.ques_length\n# ans_length = datdataset_trainaset.ans_length + 1\n# his_length = dataset_train.ques_length + dataset_train.ans_length\n# itow = dataset.itow_dialog\nimg_feat_size = opt.conv_feat_size\nout_features_size=1 #for BCE and don't forget to add sigmoid as last layer\nvocab_size = dataset_train.vocab_size\n\nprint('vocab_size = ',vocab_size)\nprint('init Generative and Discriminator model...')\nnetE_text = model._netE_text(vocab_size,opt.ninp, opt.nhid,out_features_size, opt.dropout)\nnetE_video = model._netE_video(img_feat_size, opt.nhid, opt.dropout)\nnetC=model.Classifier(2*opt.nhid, opt.nhid,out_features_size, opt.dropout)\ncritD =nn.BCELoss()# thisis discriminitor losss\nif opt.cuda:# ship to cuda, if has GPU\n netE_video.cuda()\n netE_text.cuda()\n netC.cuda()\n critD.cuda()\n\nif opt.model_path_G != '':\n print('Loading Generative model...')\n netE_video.load_state_dict(checkpoint['netE_video'])\n netE_text.load_state_dict(checkpoint['netE_text'])\n netC.load_state_dict(checkpoint['netC'])\n\n\n#########################################################################\n################ Train Model #####################################\n#########################################################################\n# training function\ndef train(epoch):\n global vid_input\n netE_video.train()\n netE_text.train()\n netC.train()\n \n lr = adjust_learning_rate(optimizerLM, epoch, opt.lr)\n # data_iter = iter(dataloader_train)\n\n average_loss = 0\n count = 0\n\n # while i < len(dataloader_train):\n for i, data in enumerate(dataloader_train): \n average_loss_temp = 0\n #print(\"IN\")\n # data = data_iter.next()\n video_data = data['video_data'].cuda()\n text_data = data['text_data']\n label = data['label'].type(torch.FloatTensor).cuda()\n dialog_id = data['dialog_id']\n dialog_turns = text_data[\"dialog_data\"]\n\n batch_size = video_data.size(0)\n # print('video_data',video_data.shape) #image torch.Size([4,15,4096])\n vid_input.data.resize_(video_data.size()).copy_(video_data)\n # print('img_input',img_input.shape)#img_input torch.Size([4,15,4096])\n vid_input = vid_input.view(-1,img_feat_size)\n # print('vid_input',vid_input.shape) #image torch.Size([4x15,4096])\n\n dialog_turns=dialog_turns.view(-1,40)\n\n video_logit = netE_video(vid_input, batch_size)\n text_logit = netE_text(dialog_turns,batch_size)\n video_text_logit=torch.cat((video_logit,text_logit),1)\n print(\"video_text_logit\", video_text_logit.shape)\n logit=netC(video_text_logit)\n # print(label, logit)\n # print(logit)\n # print(label.view(-1, 1))\n g_loss = critD(logit, label.view(-1, 1))\n\n\n # do backward.\n netE_video.zero_grad()\n netE_text.zero_grad()\n netC.zero_grad()\n \n g_loss.backward()\n optimizerLM.step()\n average_loss += g_loss.item()\n average_loss_temp += g_loss.item()\n if i % opt.log_interval == 0:\n print(\"step {} / {} (epoch {}), g_loss {:.5f},lr = {:.6f}\".format(i, len(dataloader_train), epoch, average_loss_temp/10, lr))\n average_loss_temp = 0\n # i = i + 1\n count = count + 1\n average_loss /= count\n \n return average_loss, lr\n\n\ndef val():\n global vid_input\n netE_video.eval()\n y_true = []\n y_pred = []\n y_prob = []\n data_iter_val = iter(dataloader_val)\n i = 0\n average_loss = 0\n count = 0\n final_f1_score = 0\n final_recall = 0\n final_precision = 0\n final_accuracy_score = 0\n\n for i, data in enumerate(dataloader_val): \n # data = data_iter_val.next()\n\n video_data = data['video_data'].cuda()\n text_data = data['text_data']\n label = data['label'].type(torch.FloatTensor).cuda()\n dialog_id = data['dialog_id']\n dialog_turns = text_data[\"dialog_data\"]\n batch_size = video_data.size(0)\n # print('video_data',video_data.shape) #image torch.Size([4,15,4096])\n vid_input.data.resize_(video_data.size()).copy_(video_data)\n # print('img_input',img_input.shape)#img_input torch.Size([4,15,4096])\n vid_input = vid_input.view(-1,img_feat_size)\n # print('vid_input',vid_input.shape) #image torch.Size([4x15,4096])\n\n dialog_turns=dialog_turns.view(-1,40)\n\n video_logit = netE_video(vid_input, batch_size)\n text_logit = netE_text(dialog_turns,batch_size)\n video_text_logit=torch.cat((video_logit,text_logit),1)\n logit=netC(video_text_logit)\n g_loss = critD(logit, label.view(-1, 1))\n\n average_loss += g_loss.item()\n count += 1\n y_prob.append(logit)\n logit[logit > 0.5] = 1\n logit[logit <= 0.5] = 0\n y_true.append(label.view(-1,1))\n y_pred.append(logit)\n\n y_true = torch.cat(y_true)\n y_pred = torch.cat(y_pred)\n y_prob = torch.cat(y_prob)\n y_true = y_true.cpu().data.numpy()\n y_pred = y_pred.cpu().data.numpy()\n y_prob = y_prob.cpu().data.numpy()\n final_accuracy_score = accuracy_score(y_true, y_pred)\n final_f1_score = f1_score(y_true, y_pred)\n final_precision = precision_score(y_true, y_pred)\n final_recall = recall_score(y_true, y_pred)\n final_roc_auc_score = roc_auc_score(y_true, y_prob)\n final_roc_curve = roc_curve(y_true, y_prob, pos_label=1)\n average_loss /= count\n print( \"acc:\", final_accuracy_score, \"precision:\" ,final_precision, \"recall:\",final_recall,\"roc_auc_score:\" ,final_roc_auc_score)\n\n \n\n return average_loss, final_accuracy_score, final_f1_score, final_recall, final_precision, final_roc_curve, final_roc_auc_score\n\n####################################################################################\n# Main\n####################################################################################\nvid_input = torch.FloatTensor(opt.batchSize, 15, 4096)\ndialog_turns = torch.FloatTensor(opt.batchSize, 15, 40)\ngt_index = torch.LongTensor(opt.batchSize)\n\nif opt.cuda:\n vid_input = vid_input.cuda()\n gt_index = gt_index.cuda()\n\nvid_input = Variable(vid_input)\ngt_index = Variable(gt_index)\ndialog_turns=Variable(dialog_turns)\n\noptimizerLM = optim.Adam([{'params': netE_video.parameters()}, {'params': netE_text.parameters()}, {'params': netC.parameters()} ], lr=opt.lr, betas=(opt.beta1, 0.999))\n\n# optimizer_D = optim.Adam([{'params': netW.parameters()},\n# {'params': netE.parameters()},\n# {'params': netD.parameters()},\n# {'params': netV.parameters()},\n# {'params': netA.parameters()}], lr=opt.lr, betas=(opt.beta1, 0.999))\n\nhistory = []\nhistory_pickle = []\ntrain_his = {}\n\nfor epoch in range(opt.start_epoch+1, opt.niter):\n t = time.time()\n train_loss_lm, lr = train(epoch)\n\n\n print('Evaluating ... ')\n val_loss, final_accuracy_score, final_f1_score, final_recall, final_precision, final_roc_curve, final_roc_auc_score = val()\n print ('Epoch: %d learningRate: %4f train loss gen: %4f train loss dis: %4f Time: %3f' % (epoch, lr, train_loss_lm,val_loss, time.time()-t))\n writer.add_scalars(\"Loss\",{\"Train_loss\":train_loss_lm, \"Val_loss\":val_loss}, epoch)\n writer.add_scalar(\"Accuracy\",final_accuracy_score , epoch)\n writer.add_scalar(\"f1_score\",final_f1_score , epoch)\n writer.add_scalar(\"precision\", final_precision, epoch)\n writer.add_scalar(\"recall\", final_recall, epoch)\n writer.add_scalar(\"roc_auc_score\", final_roc_auc_score, epoch)\n\n # R1 = np.sum(np.array(rank_G)==1) / float(len(rank_G))\n # R5 = np.sum(np.array(rank_G)<=5) / float(len(rank_G))\n # R10 = np.sum(np.array(rank_G)<=10) / float(len(rank_G))\n # ave = np.sum(np.array(rank_G)) / float(len(rank_G))\n # mrr = np.sum(1/(np.array(rank_G, dtype='float'))) / float(len(rank_G))\n # print ('Generator: %d/%d: mrr: %f R1: %f R5 %f R10 %f Mean %f' %(epoch, len(dataloader_val), mrr, R1, R5, R10, ave))\n # val_his_G = {'R1': R1, 'R5':R5, 'R10': R10, 'Mean':ave, 'mrr':mrr}\n\n # R1 = np.sum(np.array(rank_D)==1) / float(len(rank_D))\n # R5 = np.sum(np.array(rank_D)<=5) / float(len(rank_D))\n # R10 = np.sum(np.array(rank_D)<=10) / float(len(rank_D))\n # ave = np.sum(np.array(rank_D)) / float(len(rank_D))\n # mrr = np.sum(1/(np.array(rank_D, dtype='float'))) / float(len(rank_D))\n # print ('Discriminator: %d/%d: mrr: %f R1: %f R5 %f R10 %f Mean %f' %(epoch, len(dataloader_val), mrr, R1, R5, R10, ave))\n # val_his_D = {'R1': R1, 'R5':R5, 'R10': R10, 'Mean':ave, 'mrr':mrr}\n\n train_his = {'train_loss': train_loss_lm}\n val_his = {'val_loss': val_loss, 'accuracy_score':final_accuracy_score, \"f1_score\":final_f1_score, \"recall\":final_recall, \"precision\":final_precision,\"roc_auc_score\":final_roc_auc_score }\n val_his_pickle = {'val_loss': val_loss, 'accuracy_score':final_accuracy_score, \"f1_score\":final_f1_score, \"recall\":final_recall, \"precision\":final_precision,\"roc_auc_score\":final_roc_auc_score, \"roc_curve\":final_roc_curve }\n history.append({'epoch':epoch, 'train': train_his, 'val_his': val_his})\n history_pickle.append({'epoch':epoch, 'train': train_his, 'val_his': val_his_pickle})\n\n # saving the model.\n if epoch % opt.save_iter == 0:\n torch.save({'epoch': epoch,\n 'opt': opt,\n # 'netW': netW.state_dict(),\n \n 'netE_video': netE_video.state_dict(),\n 'netE_text' : netE_text.state_dict(),\n 'netC' : netC.state_dict() \n # 'netA': netA.state_dict(),\n # 'netG': netG.state_dict(),\n # 'netD': netD.state_dict()},\n # 'netE': netE.state_dict()},\n # 'netD': netD.state_dict()},\n },\n '%s/epoch_%d.pth' % (save_path, epoch))\n json.dump(history, open('%s/log.json' %(save_path), 'w'))\n pickle.dump(history_pickle, open('%s/log.pickle' %(save_path), 'wb')) # this is the problem with pytorch version 4 it will work on version 3 or less\n\nwriter.close() \n# Error of pytorch version 4\n# Traceback (most recent call last):\n# File \"train/train_GD.py\", line 599, in \n# json.dump(history, open('%s/log.json' %(save_path), 'w'))\n# File \"/home/badri/anaconda3/lib/python3.6/json/__init__.py\", line 179, in dump\n# for chunk in iterable:\n# File \"/home/badri/anaconda3/lib/python3.6/json/encoder.py\", line 428, in _iterencode\n# yield from _iterencode_list(o, _current_indent_level)\n# File \"/home/badri/anaconda3/lib/python3.6/json/encoder.py\", line 325, in _iterencode_list\n# yield from chunks\n# File \"/home/badri/anaconda3/lib/python3.6/json/encoder.py\", line 404, in _iterencode_dict\n# yield from chunks\n# File \"/home/badri/anaconda3/lib/python3.6/json/encoder.py\", line 404, in _iterencode_dict\n# yield from chunks\n# File \"/home/badri/anaconda3/lib/python3.6/json/encoder.py\", line 437, in _iterencode\n# o = _default(o)\n# File \"/home/badri/anaconda3/lib/python3.6/json/encoder.py\", line 180, in default\n# o.__class__.__name__)\n# TypeError: Object of type 'Tensor' is not JSON serializable\n\n","sub_path":"visual_humor_detection/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":17551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"404117818","text":"import pandas as pd\nimport datetime\nimport os\n\ncsv_name = os.popen(\"ls ./*.csv\").read().strip()\n\ndata = pd.read_csv(csv_name)\nlist_data = data.values.tolist()\njday_list = []\n\nfor i in range(len(list_data)):\n s = list_data[i][1].split()[0]\n jday = datetime.datetime.strptime(s,\"%Y-%m-%d\").timetuple().tm_yday\n jday_list.append(float(jday))\n\n\nten_day_num = []\ntarget_filename = \"fre_10_day_2010.txt\"\ntarget = open(target_filename,'w')\n\nfor i in range(0,37):\n time_start = i*10+1\n time_end = (i+1)*10+1\n count = 0\n for j in range(len(jday_list)):\n if time_start <= jday_list[j] < time_end:\n count = count+1\n str1 = str(i+1)+' '+str(count)+'\\n'\n target.writelines(str1)\n\ntarget.close()\n\n","sub_path":"2010/get_10_day_num.py","file_name":"get_10_day_num.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"532537852","text":"import pandas \n\ndef split_dataframe(df):\n X = df.drop(['patient num', 'instance num',\n 'instance code', 'class'], axis=1, errors='ignore')\n columns = X.columns\n y = df['class'].values\n groups = df['patient num'].values\n instance_nums = df['instance num'].values if 'instance num' in df else []\n return (X.values, y, groups, instance_nums, columns)\n","sub_path":"pipeline/dataset_functions.py","file_name":"dataset_functions.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17374647","text":"'''\nImplement a puzzle data structure for the 8-puzzle.\n\nYour Puzzle class must define initial_state, goal_state, and next_states(s) as\nspecified in the problem statement.\n\n__init__(self, starting_state): initializes the Puzzle with the given argument\nas the initial state; must initialize initial_state and goal_state\nARGS:\n starting_state - the starting state of the puzzle\n\nnext_state(self, s): given a state s, returns a list of all the states\nreachable from s with one slide\nARGS:\n s - the input state s\nRETURN:\n a list of states that are reachable from s with one slide; must return\n in a consistent order\n'''\nfrom collections import deque\n\nclass Puzzle:\n \n def __init__(self, starting_state):\n self.initial_state= starting_state\n self.goal_state = (1, 2, 3, 4, 5, 6, 7, 8, 0)\n\n def next_states(self, s):\n index = 0\n for i in s:\n if i==0:\n i_0= index\n index+=1\n if i_0 == 0:\n return [(s[1],)+(0,)+(s[2:]), (s[3],)+(s[1:3])+(s[0],)+(s[4:])]\n if i_0 == 1:\n return [(0,)+(s[0],)+(s[2:]), (s[0],)+(s[2],)+(0,)+(s[3:]),(s[0],)+(s[4],)+(s[2:4])+(0,)+(s[5:])]\n if i_0 == 2:\n return [(s[0],)+(s[2],)+(s[1],)+(s[3:]), (s[0:2])+(s[5],)+(s[3:5])+(0,)+(s[6:])]\n if i_0 == 3:\n return [(0,)+(s[1:3])+(s[0],)+(s[4:]), (s[0:3])+(s[4],)+(0,)+(s[5:]),(s[0:3])+(s[6],)+(s[4:6])+(0,)+(s[7:])]\n if i_0 == 4:\n return [(s[0],)+(0,)+(s[2:4])+(s[1],)+(s[5:]), (s[0:3])+(0,)+(s[3],)+(s[5:]), (s[0:4])+(s[5],)+(0,)+(s[6:]),(s[0:4])+(s[7],)+(s[5:7])+(0,)+(s[8],)]\n if i_0 == 5:\n return [(s[0:4])+(0,)+(s[4],)+(s[6:]), (s[0:2])+(0,)+(s[3:5])+(s[2],)+(s[6:]), (s[0:5])+(s[8],)+(s[6:8])+(0,)]\n if i_0 == 6:\n return [(s[0:3])+(0,)+(s[4:6])+(s[3],)+(s[7:]), (s[0:6])+(s[7],)+(0,)+(s[8],)]\n if i_0 == 7:\n return [(s[0:4])+(0,)+(s[5:7])+(s[4],)+(s[8],), (s[0:6])+(0,)+(s[6],)+(s[8],), (s[0:7])+(s[8],)+(0,)]\n if i_0 == 8:\n return [(s[0:5])+(0,)+(s[6:8])+(s[5],), (s[0:7])+(0,)+(s[7],)]\n\n\n\n\n\n'''\nsolve_puzzle(P): given an 8-puzzle data structure, returns the shortest sequence\nof states that can be used to solve the puzzle\nARGS:\n P - the 8-puzzle, with initial_state, goal_state, and next_states(s) defined\nRETURN:\n the sequence of states used to solve the puzzle in the fewest moves (as a\n list); if there are no possible solutions, return the empty list, and if the\n starting state equals the ending state, return a list containing only the\n ending state\n'''\ndef solve_puzzle(P):\n path_list = deque() \n level = {P.initial_state:{}}\n parent = {P.initial_state:None}\n i =1\n frontier= [P.initial_state]\n if frontier == [P.goal_state]:\n return frontier\n while frontier:\n next_ = []\n for u in frontier:\n for v in P.next_states(u):\n if v not in level:\n level[v] = i\n parent[v] = u\n next_.append (v)\n if v == P.goal_state:\n while v!= None:\n path_list.appendleft(v)\n v = parent.get(v)\n return list(path_list)\n \n frontier = next_\n i+=1\n \n return []\n\n\n\"\"\" \n# Part (a)\ns = (1, 2, 3, 4, 0, 6, 7, 5, 8)\nP = Puzzle(s)\nprint P.initial_state\n# (1, 2, 3, 4, 0, 6, 7, 5, 8)\nprint P.goal_state\n# (1, 2, 3, 4, 5, 6, 7, 8, 0)\nnext = P.next_states(s)\nfor state in next:\n print state\n# (1, 0, 3, 4, 2, 6, 7, 5, 8)\n# (1, 2, 3, 0, 4, 6, 7, 5, 8)\n# (1, 2, 3, 4, 6, 0, 7, 5, 8)\n# (1, 2, 3, 4, 5, 6, 7, 0, 8)\n\n# Part (b)\nsolution = solve_puzzle(P)\nfor state in solution:\n print state\n# (1, 2, 3, 4, 0, 6, 7, 5, 8)\n# (1, 2, 3, 4, 5, 6, 7, 0, 8)\n# (1, 2, 3, 4, 5, 6, 7, 8, 0)\n\"\"\"\n","sub_path":"amruss_mit_edu_code_template (2).py","file_name":"amruss_mit_edu_code_template (2).py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"259777305","text":"\"\"\"\nThis is the logging module\n\"\"\"\nimport logging\nimport sys\n\n__author__ = \"Erik Stigum\"\n__copyright__ = \"Copyright 2015, Swiss Tournament\"\n__email__ = \"estigum@gmail.com\"\n__version__ = \"1.0\"\n\ndef set_logger_mode(logger, mode):\n \"\"\"\n This will set the logging mode\n for the logger\n :param logger:\n :param mode:\n :return:\n \"\"\"\n if mode == \"DEBUG\":\n logger.setLevel(logging.DEBUG)\n elif mode == \"INFO\":\n logger.setLevel(logging.INFO)\n elif mode == \"WARNING\":\n logger.setLevel(logging.WARNING)\n elif mode == \"ERROR\":\n logger.setLevel(logging.ERROR)\n else:\n logger.setLevel(logging.INFO)\n\ndef get_logger():\n \"\"\"\n This will get a new logger\n :return logger:\n \"\"\"\n logger = logging.getLogger()\n\n handler = logging.StreamHandler(stream=sys.stdout)\n formatter = logging.Formatter(\"%(asctime)s %(name)-5s\"\n \" %(levelname)-5s %(message)s\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n return logger\n","sub_path":"vagrant/tournament/swisslogger.py","file_name":"swisslogger.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"446580340","text":"from itertools import groupby, tee\n\nalunos = [\n {'nome': 'Luiz', 'nota': 'A'},\n {'nome': 'Letícia', 'nota': 'B'},\n {'nome': 'Fabrício', 'nota': 'A'},\n {'nome': 'Rosemary', 'nota': 'C'},\n {'nome': 'Joana', 'nota': 'D'},\n {'nome': 'João', 'nota': 'A'},\n {'nome': 'Eduardo', 'nota': 'B'},\n {'nome': 'André', 'nota': 'C'},\n {'nome': 'Anderson', 'nota': 'B'},\n]\n\n\ndef ordena(item):\n return item['nota']\n\n\n# Usando função lambda\n# ordena = lambda item: item['nota']\n# alunos.sort(key=ordena)\nalunos_agrupados = groupby(alunos, ordena)\n\n'''\n# Sem tee (com list)\nfor agrupamento, valores_agrupados in alunos_agrupados:\n valores = list(valores_agrupados)\n print(f'Agrupamento: {agrupamento}')\n for aluno in valores:\n print(f'\\t{aluno}')\n quantidade = len(valores)\n print(f'\\t{quantidade} alunos tiraram nota {agrupamento}')\n'''\n\n# Com tee\nfor agrupamento, valores_agrupados in alunos_agrupados:\n v1, v2 = tee(valores_agrupados)\n\n print(f'Agrupamento: {agrupamento}')\n\n for aluno in v1:\n print(f'\\t{aluno}')\n\n quantidade = len(list(v2))\n print(f'\\t{quantidade} alunos tiraram nota {agrupamento}')\n","sub_path":"Curso_de_Python_3_do Basico_ao_Avancado_com_projetos_reais/Aulas/Aula79a_groupby_exemplo.py","file_name":"Aula79a_groupby_exemplo.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548930695","text":"##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2012 OpenERP SA ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\nimport logging\n\nfrom openerp import SUPERUSER_ID\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\n\nimport urllib\nimport urllib2\nimport json\nimport re\n\n_logger = logging.getLogger(__name__)\n\n\nclass config(osv.osv):\n _name = 'google.drive.config'\n _description = \"Google Drive templates config\"\n\n def get_google_drive_url(self, cr, uid, config_id, res_id, template_id, context=None):\n config = self.browse(cr, SUPERUSER_ID, config_id, context=context)\n model = config.model_id\n filter_name = config.filter_id and config.filter_id.name or False\n record = self.pool.get(model.model).read(cr, uid, res_id, [], context=context)\n record.update({'model': model.name, 'filter': filter_name})\n name_gdocs = config.name_template\n try:\n name_gdocs = name_gdocs % record\n except:\n raise osv.except_osv(_('Key Error!'), _(\"At least one key cannot be found in your Google Drive name pattern\"))\n\n attach_pool = self.pool.get(\"ir.attachment\")\n attach_ids = attach_pool.search(cr, uid, [('res_model', '=', model.model), ('name', '=', name_gdocs), ('res_id', '=', res_id)])\n url = False\n if attach_ids:\n attachment = attach_pool.browse(cr, uid, attach_ids[0], context)\n url = attachment.url\n else:\n url = self.copy_doc(cr, uid, res_id, template_id, name_gdocs, model.model, context).get('url')\n return url\n\n def get_access_token(self, cr, uid, scope=None, context=None):\n ir_config = self.pool['ir.config_parameter']\n google_drive_refresh_token = ir_config.get_param(cr, SUPERUSER_ID, 'google_drive_refresh_token')\n group_config = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base', 'group_erp_manager')[1]\n user = self.pool['res.users'].read(cr, uid, uid, \"groups_id\")\n if not google_drive_refresh_token:\n if group_config in user['groups_id']:\n raise self.pool.get('res.config.settings').get_config_warning(cr, _(\"You haven't configured 'Authorization Code' generated from google, Please generate and configure it in %(menu:base_setup.menu_general_configuration)s.\"), context=context)\n else:\n raise osv.except_osv(_('Error!'), _(\"Google Drive is not yet configured. Please contact your administrator.\"))\n google_drive_client_id = ir_config.get_param(cr, SUPERUSER_ID, 'google_drive_client_id')\n google_drive_client_secret = ir_config.get_param(cr, SUPERUSER_ID, 'google_drive_client_secret')\n #For Getting New Access Token With help of old Refresh Token\n\n data = urllib.urlencode(dict(client_id=google_drive_client_id,\n refresh_token=google_drive_refresh_token,\n client_secret=google_drive_client_secret,\n grant_type=\"refresh_token\",\n scope=scope or 'https://www.googleapis.com/auth/drive'))\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept-Encoding\": \"gzip, deflate\"}\n try:\n req = urllib2.Request('https://accounts.google.com/o/oauth2/token', data, headers)\n content = urllib2.urlopen(req).read()\n except urllib2.HTTPError:\n if group_config in user['groups_id']:\n raise self.pool.get('res.config.settings').get_config_warning(cr, _(\"Something went wrong during the token generation. Please request again an authorization code in %(menu:base_setup.menu_general_configuration)s.\"), context=context)\n else:\n raise osv.except_osv(_('Error!'), _(\"Google Drive is not yet configured. Please contact your administrator.\"))\n content = json.loads(content)\n return content.get('access_token')\n\n def copy_doc(self, cr, uid, res_id, template_id, name_gdocs, res_model, context=None):\n ir_config = self.pool['ir.config_parameter']\n google_web_base_url = ir_config.get_param(cr, SUPERUSER_ID, 'web.base.url')\n access_token = self.get_access_token(cr, uid, context=context)\n # Copy template in to drive with help of new access token\n request_url = \"https://www.googleapis.com/drive/v2/files/%s?fields=parents/id&access_token=%s\" % (template_id, access_token)\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept-Encoding\": \"gzip, deflate\"}\n try:\n req = urllib2.Request(request_url, None, headers)\n parents = urllib2.urlopen(req).read()\n except urllib2.HTTPError:\n raise self.pool.get('res.config.settings').get_config_warning(cr, _(\"The Google Template cannot be found. Maybe it has been deleted.\"), context=context)\n parents_dict = json.loads(parents)\n\n record_url = \"Click on link to open Record in OpenERP\\n %s/?db=%s#id=%s&model=%s\" % (google_web_base_url, cr.dbname, res_id, res_model)\n data = {\"title\": name_gdocs, \"description\": record_url, \"parents\": parents_dict['parents']}\n request_url = \"https://www.googleapis.com/drive/v2/files/%s/copy?access_token=%s\" % (template_id, access_token)\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n data_json = json.dumps(data)\n # resp, content = Http().request(request_url, \"POST\", data_json, headers)\n req = urllib2.Request(request_url, data_json, headers)\n content = urllib2.urlopen(req).read()\n content = json.loads(content)\n res = {}\n if content.get('alternateLink'):\n attach_pool = self.pool.get(\"ir.attachment\")\n attach_vals = {'res_model': res_model, 'name': name_gdocs, 'res_id': res_id, 'type': 'url', 'url': content['alternateLink']}\n res['id'] = attach_pool.create(cr, uid, attach_vals)\n res['url'] = content['alternateLink']\n return res\n\n def get_google_drive_config(self, cr, uid, res_model, res_id, context=None):\n '''\n Function called by the js, when no google doc are yet associated with a record, with the aim to create one. It\n will first seek for a google.docs.config associated with the model `res_model` to find out what's the template\n of google doc to copy (this is usefull if you want to start with a non-empty document, a type or a name\n different than the default values). If no config is associated with the `res_model`, then a blank text document\n with a default name is created.\n :param res_model: the object for which the google doc is created\n :param ids: the list of ids of the objects for which the google doc is created. This list is supposed to have\n a length of 1 element only (batch processing is not supported in the code, though nothing really prevent it)\n :return: the config id and config name\n '''\n if not res_id:\n raise osv.except_osv(_('Google Drive Error!'), _(\"Creating google drive may only be done by one at a time.\"))\n # check if a model is configured with a template\n config_ids = self.search(cr, uid, [('model_id', '=', res_model)], context=context)\n configs = []\n for config in self.browse(cr, uid, config_ids, context=context):\n if config.filter_id:\n if (config.filter_id.user_id and config.filter_id.user_id.id != uid):\n #Private\n continue\n domain = [('id', 'in', [res_id])] + eval(config.filter_id.domain)\n local_context = context and context.copy() or {}\n local_context.update(eval(config.filter_id.context))\n google_doc_configs = self.pool.get(config.filter_id.model_id).search(cr, uid, domain, context=local_context)\n if google_doc_configs:\n configs.append({'id': config.id, 'name': config.name})\n else:\n configs.append({'id': config.id, 'name': config.name})\n return configs\n\n def _resource_get(self, cr, uid, ids, name, arg, context=None):\n result = {}\n for data in self.browse(cr, uid, ids, context):\n mo = re.search(\"(key=|/d/)([A-Za-z0-9-_]+)\", data.google_drive_template_url)\n if mo:\n result[data.id] = mo.group(2)\n else:\n raise osv.except_osv(_('Incorrect URL!'), _(\"Please enter a valid Google Document URL.\"))\n return result\n\n def _client_id_get(self, cr, uid, ids, name, arg, context=None):\n result = {}\n client_id = self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'google_drive_client_id')\n for config_id in ids:\n result[config_id] = client_id\n return result\n\n _columns = {\n 'name': fields.char('Template Name', required=True, size=1024),\n 'model_id': fields.many2one('ir.model', 'Model', ondelete='set null', required=True),\n 'model': fields.related('model_id', 'model', type='char', string='Model', readonly=True),\n 'filter_id': fields.many2one('ir.filters', 'Filter', domain=\"[('model_id', '=', model)]\"),\n 'google_drive_template_url': fields.char('Template URL', required=True, size=1024),\n 'google_drive_resource_id': fields.function(_resource_get, type=\"char\", string='Resource Id'),\n 'google_drive_client_id': fields.function(_client_id_get, type=\"char\", string='Google Client '),\n 'name_template': fields.char('Google Drive Name Pattern', size=64, help='Choose how the new google drive will be named, on google side. Eg. gdoc_%(field_name)s', required=True),\n 'active': fields.boolean('Active'),\n }\n\n def onchange_model_id(self, cr, uid, ids, model_id, context=None):\n res = {}\n if model_id:\n model = self.pool['ir.model'].browse(cr, uid, model_id, context=context)\n res['value'] = {'model': model.model}\n else:\n res['value'] = {'filter_id': False, 'model': False}\n return res\n\n _defaults = {\n 'name_template': 'Document %(name)s',\n 'active': True,\n }\n\n def _check_model_id(self, cr, uid, ids, context=None):\n config_id = self.browse(cr, uid, ids[0], context=context)\n if config_id.filter_id and config_id.model_id.model != config_id.filter_id.model_id:\n return False\n return True\n\n _constraints = [\n (_check_model_id, 'Model of selected filter is not matching with model of current template.', ['model_id', 'filter_id']),\n ]\n\n def get_google_scope(self):\n return 'https://www.googleapis.com/auth/drive'\n\nconfig()\n\n\nclass base_config_settings(osv.osv):\n _inherit = \"base.config.settings\"\n\n _columns = {\n 'google_drive_authorization_code': fields.char('Authorization Code', size=124),\n 'google_drive_uri': fields.char('URI', readonly=True, help=\"The URL to generate the authorization code from Google\"),\n }\n _defaults = {\n 'google_drive_uri': lambda s, cr, uid, c: s.pool['google.service']._get_google_token_uri(cr, uid, 'drive', scope=s.pool['google.drive.config'].get_google_scope(), context=c),\n }\n\n def set_google_authorization_code(self, cr, uid, ids, context=None):\n config = self.browse(cr, uid, ids[0], context)\n refresh_token = self.pool['google.service'].generate_refresh_token(cr, uid, 'drive', config.google_drive_authorization_code, context=context)\n self.pool['ir.config_parameter'].set_param(cr, uid, 'google_drive_refresh_token', refresh_token)\n","sub_path":"google_drive/google_drive.py","file_name":"google_drive.py","file_ext":"py","file_size_in_byte":12522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"165641019","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\nfrom setuptools.command.install import install as _install\n\nclass install(_install):\n def pre_install_script(self):\n pass\n\n def post_install_script(self):\n pass\n\n def run(self):\n self.pre_install_script()\n\n _install.run(self)\n\n self.post_install_script()\n\nif __name__ == '__main__':\n setup(\n name = 'jenkins-scrapper',\n version = '0.1.14',\n description = '',\n long_description = '',\n long_description_content_type = None,\n classifiers = [\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python'\n ],\n keywords = '',\n\n author = '',\n author_email = '',\n maintainer = '',\n maintainer_email = '',\n\n license = '',\n\n url = '',\n project_urls = {},\n\n scripts = [],\n packages = [],\n namespace_packages = [],\n py_modules = [],\n entry_points = {},\n data_files = [],\n package_data = {},\n install_requires = [\n 'astroid==2.5.6',\n 'autopep8==1.5.7',\n 'beautifulsoup4==4.9.3',\n 'bs4==0.0.1',\n 'certifi==2020.12.5',\n 'chardet==4.0.0',\n 'idna==2.10',\n 'isort==5.8.0',\n 'lazy-object-proxy==1.6.0',\n 'lxml==4.6.3',\n 'mccabe==0.6.1',\n 'pycodestyle==2.7.0',\n 'pylint==2.8.2',\n 'requests==2.25.1',\n 'rope==0.19.0',\n 'soupsieve==2.2.1',\n 'toml==0.10.2',\n 'typed-ast==1.4.3',\n 'urllib3==1.26.4',\n 'wrapt==1.12.1'\n ],\n dependency_links = [],\n zip_safe = True,\n cmdclass = {'install': install},\n python_requires = '',\n obsoletes = [],\n )\n","sub_path":"target/dist/jenkins-scrapper-1.0.dev0/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"220109752","text":"from configparser import ConfigParser\nimport subprocess\nfrom logger import setup_custom_logger\n\n# Create instance of configParser\nconfig = ConfigParser()\n\n# Create logger instance\n\nlogger = setup_custom_logger(\"MAIN_THREAD\")\n# Import ini file\nconfig.read(\"config.ini\")\n\nbat = config.get(\"main\", \"bat_path\") + \"/run_all_scripts.bat\"\n\n\n# Create a subprocess based on the file in the filepath\nrun_all_scripts = subprocess.Popen(bat)\n\n\nstdout, stderr = run_all_scripts.communicate()\n\nlogger.info(\"---------------------------------------------------\")\nlogger.info(\"Finished\")\n\n# Return status code (if 0 its good)\nprint(run_all_scripts.returncode)","sub_path":"sse-aai-zsa/Zsa/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"274099361","text":"from accounts.mixins import LoginRequiredMixin, AdminRequiredMixin\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import redirect, get_object_or_404\nfrom django.views.generic import TemplateView\nfrom documents.defs import get_mimes_for_category\nfrom documents.forms import DocumentForm\nfrom documents.models import Document\nfrom accounts.models import Person\nfrom .models import Project\nfrom .forms import CategoryForm, ProjectForm, SortForm, permission_forms, CompanyForm, DeleteProjectsForm\n\n\nclass ProjectsView(LoginRequiredMixin, TemplateView):\n template_name = 'projects.html'\n\n def get_context_data(self, **kwargs):\n context = {\n 'companies': self.request.user.get_companies_list(),\n 'form': self.form,\n }\n if self.request.user.is_admin:\n context['deleted_form'] = self.deleted_form\n\n return context\n\n def dispatch(self, request, *args, **kwargs):\n self.form = ProjectForm(request.POST or None, company=request.user.company,\n is_admin=self.request.user.is_admin)\n if request.user.is_admin:\n self.deleted_form = DeleteProjectsForm(request.POST or None)\n\n return super(ProjectsView, self).dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n if not request.user.is_admin:\n messages.error(request, 'You do not have permission to create a new project')\n return redirect(reverse('projects'))\n\n if 'create' in request.POST and self.form.is_valid():\n project = self.form.save()\n # Add everyone in the organization to the project.\n #[project.clients.add(person) for person in project.company.get_people_list()]\n messages.success(request, 'The project has been created')\n return redirect(reverse('project', args=[project.pk]))\n\n if 'undelete' in request.POST and self.deleted_form.is_valid():\n project = self.deleted_form.save()\n messages.success(request, 'The project %s has been un-deleted' % project.name)\n return redirect(reverse('projects'))\n\n return self.render_to_response(self.get_context_data())\n\n\nclass ProjectView(LoginRequiredMixin, TemplateView):\n template_name = 'project.html'\n\n def get_context_data(self, **kwargs):\n documents = Document.objects.active().filter(project=self.project)\n if hasattr(self, 'sort_order'):\n if self.sort_order == 'AZ':\n documents = documents.order_by('name')\n if hasattr(self, 'category'):\n mimes = get_mimes_for_category(self.category)\n documents = documents.filter(mime__in=mimes)\n return {\n 'categories_form': self.categories_form,\n 'company_form': self.company_form,\n 'documents': documents,\n 'form': self.form,\n 'project': self.project,\n 'sort_form': self.sort_form,\n }\n\n def dispatch(self, request, pk, *args, **kwargs):\n self.project = get_object_or_404(Project.objects.active(), pk=pk)\n person = Person.objects.get(pk=self.request.user.pk)\n self.form = DocumentForm(request.POST or None, request.FILES or None, person=person,\n project=self.project)\n self.categories_form = CategoryForm(request.GET or None, project=self.project)\n self.company_form = CompanyForm(request.POST or None, project=self.project)\n self.sort_form = SortForm(request.GET or None)\n if 'search' in request.GET:\n self.sort_order = request.GET.get('search')\n if 'category' in request.GET and request.GET.get('category'):\n self.category = request.GET.get('category')\n return super(ProjectView, self).dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n if 'delete' in request.POST and request.user.is_admin:\n self.project.set_active(False)\n messages.success(request, 'The project has been deleted')\n return redirect(reverse('projects'))\n\n if 'move' in request.POST and request.user.is_admin and self.company_form.is_valid():\n self.company_form.save()\n return redirect(reverse('project', args=[self.project.pk]))\n\n if 'upload' in request.POST and self.form.is_valid():\n self.form.save()\n messages.success(request, 'The document has been created')\n return redirect(reverse('project', args=[self.project.pk]))\n return self.render_to_response(self.get_context_data())\n\n\nclass ProjectPermissions(AdminRequiredMixin, TemplateView):\n template_name = 'permissions.html'\n\n def get_context_data(self, **kwargs):\n return {\n 'forms': self.forms,\n 'project': self.project,\n }\n\n def dispatch(self, request, pk, *args, **kwargs):\n self.project = get_object_or_404(Project.objects.active(), pk=pk)\n self.forms = permission_forms(request, self.project)\n return super(ProjectPermissions, self).dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n all_valid = True\n for form in self.forms:\n if form.is_valid():\n form.save()\n else:\n all_valid = False\n if all_valid:\n messages.success(request, 'The permissions have been updated.')\n return redirect(reverse('permissions', args=[self.project.pk]))\n return self.render_to_response(self.get_context_data())\n","sub_path":"projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"517725355","text":"import json\n\nfrom ksm_challenge_src import util\n# from .data import data\n#\n# numerical = data.numerical\n#\n#\n# # 经验获取计算 ———— 利用这个计算某个等级的boss的经验平均应该有多少\n# def exp_earn_calc(lv: int):\n# current = numerical['exp_earn_base']\n# for i in range(lv - 1):\n# current = current * numerical['exp_earn_rate'] + numerical['exp_earn_add']\n# return current\n#\n#\n#\n# def res_print(func):\n# def wrapper(arg):\n# value = func(arg)\n# print(value)\n# return value\n#\n# return wrapper\nfrom ksm_challenge_src.GameChar import numerical\nimport matplotlib.pyplot as plt\n\n\ndef _exp_requirement(lv: int) -> 100:\n return int(\n util.recurrence(numerical['exp_base'], numerical['exp_add_rate'], numerical['exp_add_point'], lv) / 100) * 100\n\n\ndef exp_to_talent_coin(exp):\n return int(i ** numerical['talent_coin_earn_index'] * numerical['talent_coin_earn_rate'])\n\nif __name__ == '__main__':\n\n a = []\n for i in range(1, 31):\n a.append(_exp_requirement(i))\n\n\n a = [int(i ** numerical['talent_coin_earn_index'] * numerical['talent_coin_earn_rate']) for i in a]\n\n with open('data/talent.json') as FILE:\n talent = json.load(FILE)\n\n param = talent['talent_coin_earn_rate']\n b = []\n for lvl in range(param['max_level']):\n b.append(1 * util.recurrence(param['cost_base'], param['cost_ratio'], param['cost_grow'], lvl + 1))\n\n\n plt.plot(range(1,31), a)\n plt.plot(range(param['max_level']), b)\n plt.show()","sub_path":"ksm_challenge_src/PlayerNumericalDesign.py","file_name":"PlayerNumericalDesign.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"629997296","text":"import calendar\nimport os\nimport bs4.element\nfrom bs4 import BeautifulSoup\nimport requests\nimport csv\nimport re\nimport operator\nfrom collections import OrderedDict\nimport json\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\nclass Funds:\n datasets = []\n\n def slugify(string):\n return re.sub(r'[-\\s]+', '-', (re.sub(r'[^\\w\\s-]', '', string).strip().lower()))\n\n def __init__(self):\n self._session = requests.session()\n self.set_proxy(proxy=None) # set proxy directly \"http://user:pass@10.10.1.0:1080\"\n self._filepath = str(os.path.dirname(os.path.abspath(__file__))) + '/funds.json'\n self._const = self.init_const()\n self._url = \"https://www.moneycontrol.com/mutual-funds/axis-long-term-equity-fund-direct-plan/portfolio-holdings/\"\n # more items will get added later\n\n def init_const(self):\n with open(self._filepath, 'r') as f:\n return json.load(f)\n\n def set_proxy(self, proxy):\n \"\"\"\n This is optional method to work with proxy server before getting any data.\n :param proxy: provide dictionary for proxies setup as\n proxy = { 'http': 'http://user:pass@10.10.1.0:1080',\n 'https': 'http://user:pass@10.10.1.0:1090'}\n :return: None\n \"\"\"\n proxy_dict = {\n \"http\": proxy,\n \"https\": proxy,\n \"ftp\": proxy\n }\n try:\n result = requests.get(\"http://google.com\", proxies=proxy_dict, timeout=5)\n except requests.exceptions.RequestException as e:\n print(\"Proxy is possibly not needed.\", e)\n proxy_dict = None\n self._session.proxies = proxy_dict\n\n def get_stock_price(self, url):\n price_detail = {}\n url = \"https://www.moneycontrol.com/india/stockpricequote/sugar/balrampurchinimills/BCM\"\n result = self._session.get(url, timeout=30)\n if result.status_code == 200:\n soup1 = BeautifulSoup(result.content, \"lxml\")\n change_text = soup1.find(\"div\", {\"id\": \"nsechange\"}).text\n price_detail[\"change\"] = change_text.split(\" \")[0]\n price_detail[\"change_per\"] = change_text.split(\" \")[1].strip(\"(\").strip(\")\")\n\n # print(\"div: \" + div)\n # print(\"Change: \" + div.split(\" \")[0] + \" Change % : \" + div.split(\" \")[1].strip(\"(\").strip(\")\"))\n # print(\"Price is : \" + soup1.find(\"div\", {\"id\": \"nsecp\"})[\"rel\"] )\n price = soup1.find(\"div\", {\"id\": \"nsecp\"})[\"rel\"]\n price_detail[\"price\"] = price\n return price_detail\n\n def get_tag_parse(self, tag_str):\n soup = BeautifulSoup(tag_str, 'html.parser')\n print(soup.find(\"div\", {\"id\": \"nsechange\"}).text)\n\n def parse_fund(self, fund_key, fund_name):\n url = self._url + fund_key\n print(\"URL of mutual fund\" + fund_name + \" is: \" + url)\n result = self._session.get(url, timeout=30)\n if result.status_code == 200:\n soup = BeautifulSoup(result.content, \"lxml\")\n date_on = soup.find(\"span\", attrs={\"class\": \"subtext TT\"}).get_text().strip()\n print(\"Result dated \", date_on)\n current_month = datetime.today().month\n report_month = (datetime.strptime(date_on.strip(\"()\").strip(\"as on\"), '%dst %b,%Y') + relativedelta(months=1)).month\n if current_month == report_month:\n print(\"Perfect! Report is for current month.\", calendar.month_name[report_month])\n else:\n print(\"Report is old for last month.\", calendar.month_name[report_month])\n stock_table = soup.find(\"table\", id=\"equityCompleteHoldingTable\")\n headings = [th.get_text() for th in stock_table.find(\"tr\").find_all(\"th\")]\n headings = [\"id\", \"fund-name\", \"url\"] + headings\n head_slug = [self.slugify(h) for h in headings]\n tbody = stock_table.find(\"tbody\")\n\n for row in tbody.find_all(\"tr\")[1:]:\n temp = [td.get_text().strip() for td in row.find_all('td')]\n url = row.find('a', href=True)['href']\n dataset = dict(zip(head_slug, [fund_key, fund_name, url] + temp))\n self.datasets.append(dataset)\n\n def write_data_to_csv(self):\n keys = list(self.datasets[0].keys())\n with open('data.csv', 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(self.datasets)\n\nif __name__ == \"__main__\":\n funds = Funds()\n # 1. Get every fund and open detail portfolio\n for fund in funds.init_const()['funds']['mid']:\n print(fund['key'], fund['name'])\n funds.parse_fund(fund['key'], fund['name'])\n funds.write_data_to_csv()\n","sub_path":"funds.py","file_name":"funds.py","file_ext":"py","file_size_in_byte":4814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"107196883","text":"\nimport os\nimport shutil\nfrom argparse import Namespace\nfrom pathlib import Path\nfrom importlib.machinery import SourceFileLoader\nfrom datetime import datetime, timedelta\n\nimport yaml\n\nfrom hellflame.services.Service import Service\n\n\nclass TrainService(Service):\n command_name = 'train'\n help_text = '設定ファイルの内容を元に学習を行う'\n\n def __init__(self,subparsers):\n super().__init__(subparsers)\n\n # subparserの登録\n def register_parser(self,parser):\n # 必須項目\n parser.add_argument('config',type=str,\n help='config file path, 設定ファイルへのパス')\n # コマンドライン引数からパスを指定する場合\n parser.add_argument('--name','-n',type=str,\n dest='name', help='experiment name, 実験名')\n parser.add_argument('--programs','-p', type=str,\n default=None,\n dest='prog', help='ソースコードのディレクトリ')\n parser.add_argument('--datasets','-d', type=str,\n default=None,\n dest='data', help='データセット置き場')\n parser.add_argument('--experiments','-e', type=str,\n default=None,\n dest='exp', help='実験データ置き場')\n parser.add_argument('--temporary','-t', type=str,\n default=None,\n dest='tmp', help='一時ファイル置き場')\n # 強制的に最初から\n parser.add_argument('--force-clear', action='store_true',\n dest='clear', help='強制的に最初から')\n # すべて yes オプション\n parser.add_argument('--yes','-y', action='store_true',\n dest='yes', help='ディレクトリの作成などすべて自動でyesを入力')\n # GPU指定オプション\n parser.add_argument('--gpu','-g', type=str,\n default=None,\n dest='gpu', help='GPUを指定する場合')\n\n # エントリーポイント\n def handler_function(self,args):\n print('\\033[36m::: >>> Enter: TrainService\\033[0m')\n\n # 設定ファイルの存在確認と読み込み\n config_path = Path(args.config)\n if not config_path.exists():\n raise Exception('there is not such a config file : '+str(config_path))\n else:\n with config_path.open('r') as f:\n config = yaml.load(f,Loader=yaml.FullLoader)\n\n # マシンの確認\n if 'machine' in config['environ'] and None!=config['environ']['machine']!=os.uname()[1]:\n raise Exception(' setting name is not this machine : %s'%(config['environ']['machine']))\n machine = os.uname()[1]\n\n # GPU設定の確認\n cuda_string = self.get_device_settings(args,config) # str, list of int\n # cpu\n # 0,1,2\n # None <- 数え上げるのはtorchの仕事. CUDA_VISIBLE_DEVICESの設定がされていないので��べて扱えるはず\n\n # 実験名の取得\n if ('exp_name' not in config['environ'] or config['environ']['exp_name']==None) and args.name==None:\n raise Exception(' set experiment name : --name hoge/fuga/piyo')\n exp_name = args.name if args.name!=None else config['environ']['exp_name']\n\n # パスの設定読み込み\n paths = self.get_paths(args, config) # paths : Namespace, all attr is Path\n # 実験保存ディレクトリの作成\n paths['savedir'] = paths['exp'] / exp_name\n if args.clear: # 強制新規作成のときは\n shutil.rmtree(paths['savedir'],ignore_errors=True)\n paths['savedir'].mkdir(parents=True,exist_ok=True)\n\n # 実験ディレクトリの中身を見てcontinue確認する\n continue_flag = self.continue_check(paths['savedir'])\n\n # 設定ファルに読み込んだ内容を追加\n config['env'] = {\n 'prog' : paths['prog'], # プログラムのルートディレクトリ\n 'data' : paths['data'], # データセットのルートディレクトリ\n 'tmp' : paths['tmp'], # 計算キャッシュやglobal_writerに使う\n 'savedir' : paths['savedir'], # この実験の保存ディレクトリ\n 'is_continue': continue_flag, # 続きからかどうか\n 'exp_name': exp_name, # 実験の名前\n 'machine': machine, # マシン名\n 'cuda_string': cuda_string, # CUDA_VISIBLE_DEVICESに設定された文字列\n 'log': {\n 'exp' : paths['exp'],\n 'config': config_path, # 設定ファイルのパス\n }\n }\n\n # 設定ファイルの保存\n timestamp = (datetime.now()+timedelta(milliseconds=1)).strftime('%Y%m%d_%H%M_%S.%f')[:-3]\n shutil.copy(config_path, paths['savedir']/('hellflame_raw_config_%s.yml'%(timestamp)) ) # 生のやつの保存\n with (paths['savedir']/('hellflame_config_%s.yml'%(timestamp))).open('w') as f: # 読み取り結果の保存\n yaml.dump(config,f)\n\n # Trainerの呼び出し\n Trainer = getattr( SourceFileLoader( '_hellflame_trainer',\n str(config['env']['prog']/'trainer'/(config['trainer']['name']+'.py'))\n ).load_module(), 'Trainer')\n print('\\033[36m>>> ================ environment construction ================= <<<\\033[0m')\n trainer = Trainer(config)\n print('\\033[36m>>> ======================= train start ======================= <<<\\033[0m')\n try:\n trainer.train()\n # 終了したことを明示\n (paths['savedir']/'hellflame_end_point').touch()\n print('\\033[36m>>> ======================== train end ======================== <<<\\033[0m')\n del trainer\n except KeyboardInterrupt:\n print('\\n\\033[36m>>> ====================== catch Ctrl-C ======================= <<<\\033[0m')\n del trainer\n print('\\033[36m::: <<< Exit: TrainService\\033[0m')\n return 1\n\n\n def get_paths(self,args,config):\n \"\"\"パスの設定\n\n 優先度\n current path < environment variable < config file < command line input\n \"\"\"\n\n # 初期化\n path_names = ['prog','data','exp','tmp']\n paths = {k:{'path':None,'src':None} for k in path_names} # パスと情報ソース\n\n # プログラムだけ初期設定はカレントディレクトリ\n paths['prog'] = {'path':Path(os.getcwd()),'src':'current path'}\n\n # 環境変数の読み込み\n env_names = [ 'ML'+name.upper() for name in path_names]\n paths = { p:{'path':Path(os.environ.get(e)),'src':'environment variable'}\n if os.environ.get(e) is not None else paths[p] for p, e in zip(path_names,env_names) }\n\n # 設定ファイルからパスの読み込み\n if 'path' in config['environ']:\n paths = { p:{'path':Path(config['environ']['path'][e]),'src':'config file'}\n if e in config['environ']['path'] else paths[p] for p, e in zip(path_names,env_names) }\n\n # コマンドライン引数からの読み込み\n paths = { p:{'path':Path(vars(args)[p]),'src':'commandline args'}\n if vars(args)[p] is not None else paths[p] for p, e in zip(path_names,env_names) }\n\n # 存在確認\n for pname, ename in zip(path_names,env_names):\n # パスが設定されていないとき\n if paths[pname]['path'] is None:\n raise Exception('set path : '+pname+' ($'+ename+')')\n # 設定されたパスが存在しないとき\n if not paths[pname]['path'].exists():\n if pname in ['exp','tmp']: # 書き込み系ディレクトリはディレクトリを作っていいか聞く\n if args.yes or input('the path is not exist : %s\\n'\n ' read from %s\\n'\n 'Do you make the path (y/n)? >> '%(\n str(paths[pname]['path']),paths[pname]['src'])) == 'y':\n paths[pname]['path'].mkdir(parents=True)\n else:\n raise Exception(' set accurate path : %s ($%s)'%(pname,ename))\n else:\n raise Exception('the path is not exist : '+str(paths[pname]['path'])+\\\n '\\n read from '+paths[pname]['src']+\\\n '\\n set accurate path : '+pname+' ($'+ename+')')\n\n result_paths = {k:v['path'] for k,v in paths.items()}\n return result_paths\n\n\n def continue_check(self,save_path):\n '''\n 実験が続きからかどうか見る\n '''\n start_file = save_path/'hellflame_start_point'\n end_file = save_path/'hellflame_end_point'\n\n # 既に終了しているとき\n if end_file.exists():\n raise Exception('this experiments has been ended.')\n\n # スタートファイルがあったら continue = True\n if start_file.exists():\n return True\n # なかったら作る\n else:\n start_file.touch()\n return False\n\n\n def get_device_settings(self,args,config):\n # GPU 優先度 CUDA_VISIBLE_DEVICES > argparse > config\n\n # pci順で考える\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n\n ## CUDA_VISIBLE_DEVICES あり\n if os.environ.get('CUDA_VISIBLE_DEVICES') is not None:\n cuda_string = os.environ.get('CUDA_VISIBLE_DEVICES')\n return cuda_string\n\n # hellflame コマンドライン指定あり\n if args.gpu is not None:\n if args.gpu == 'cpu':\n cuda_string = os.environ['CUDA_VISIBLE_DEVICES'] = 'cpu'\n elif args.gpu == 'all':\n cuda_string = None\n else:\n cuda_string = os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n return cuda_string\n\n # 設定ファイルに指定があるとき\n if 'gpu' in config['environ']:\n if config['environ']['gpu'] == 'cpu':\n cuda_string = os.environ['CUDA_VISIBLE_DEVICES'] = 'cpu'\n elif config['environ']['gpu'] == 'all':\n cuda_string = None\n else:\n cuda_string = os.environ['CUDA_VISIBLE_DEVICES'] = \\\n ''.join([str(i) for i in config['environ']['gpu']])\n return cuda_string\n\n cuda_string = None\n return cuda_string\n","sub_path":"hellflame/services/TrainService.py","file_name":"TrainService.py","file_ext":"py","file_size_in_byte":10886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"634206815","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 3 20:32:20 2018\n\n@author: jacobschroeder\n\"\"\"\n\nimport pandas as pd\n\n\n\ndef ascii_parser(text_file):\n \"\"\" Given a raw ASCII file, parses and returns a pandas DataFrame \"\"\"\n raw_file = open(text_file, 'r')\n\n parse_dict = {\n 'Year' : [],\n 'State' : [],\n # 'Race' : [],\n # 'Origin' : [],\n # 'Sex' : [],\n 'Age' : [],\n 'Population' : []\n }\n \n for line in raw_file:\n parse_dict['Year'].append(line[:4])\n parse_dict['State'].append(line[4:6])\n # parse_dict['Race'].append(line[13])\n # parse_dict['Origin'].append(line[14])\n # parse_dict['Sex'].append(line[15])\n parse_dict['Age'].append(line[16:18])\n parse_dict['Population'].append(line[18:25])\n \n df = pd.DataFrame(parse_dict)\n \n df['Age'] = df['Age'].astype('int')\n df['Population'] = df['Population'].astype('int')\n \n df_summed = df.groupby(['Year','State','Age']).sum()\n \n df_final = pd.pivot_table(df_summed, index=['Year', 'State'], columns = 'Age', values= 'Population')\n \n return df_final\n\ntext_file = '/Users/jacobschroeder/anaconda3/projects/us.1990_2016.19ages.txt'\n\ndf = ascii_parser(text_file)\n\ndf.to_csv('/Users/jacobschroeder/anaconda3/projects/full_dataset_bystate.csv')","sub_path":"capstone_project/final/data_wrangling_enhanced.py","file_name":"data_wrangling_enhanced.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"48387777","text":"#!/bin/python3\n\ndef multiply_two(arr, num):\n multiple = []\n for i in arr:\n if num%2 == 0:\n multiple.append(i)\n i=0\n j =len(multiple) - 1\n while i < j and i != len(arr) - 1:\n if multiple[i] * multiple[j] == 20:\n return multiple[i], multiple[j]\n else:\n j -= 1\n if j == i:\n i += 1\n j = len(multiple) - 1\n return -1\n\n\nif __name__=='__main__':\n arr = list(map(int, input().rstrip().split()))\n num = int(input())\n result = multiply_two(arr, num)\n print(result)\n","sub_path":"hackerrank/geeks/multiply_two.py","file_name":"multiply_two.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"411999510","text":"import struct\nimport os\n\n\ncurrent_pe = None\n\n\nclass PE:\n \"\"\"Basic PE parsing.\n Ref:\n - https://hshrzd.wordpress.com/pe-bear/\n - https://blog.kowalczyk.info/articles/pefileformat.html\n \"\"\"\n X86_64 = 0x8664\n X86_32 = 0x14c\n ARM = 0x1c0\n ARM64 = 0xaa64\n ARMNT = 0x1c4\n AM33 = 0x1d3\n IA64 = 0x200\n EFI = 0xebc\n MIPS = 0x166\n MIPS16 = 0x266\n MIPSFPU = 0x366\n MIPSFPU16 = 0x466\n WCEMIPSV2 = 0x169\n POWERPC = 0x1f0\n POWERPCFP = 0x1f1\n SH3 = 0x1a2\n SH3DSP = 0x1a3\n SH4 = 0x1a6\n SH5 = 0x1a8\n THUMP = 0x1c2\n RISCV32 = 0x5032\n RISCV64 = 0x5064\n RISCV128 = 0x5128\n M32R = 0x9041\n\n dos_magic = b'MZ'\n ptr_to_pe_header = None\n pe_magic = b'PE'\n machine = X86_32\n num_of_sections = None\n size_of_opt_header = None\n dll_charac = None\n opt_magic = b'\\x02\\x0b'\n entry_point = None\n base_of_code = None\n image_base = None\n\n\n def __init__(self, pe=\"\"):\n if not os.access(pe, os.R_OK):\n err(\"'{0}' not found/readable\".format(pe))\n err(\"Failed to get file debug information, most of gef features will not work\")\n return\n\n with open(pe, \"rb\") as fd:\n # off 0x0\n self.dos_magic = fd.read(2)\n if self.dos_magic != PE.dos_magic:\n self.machine = None\n return\n\n # off 0x3c\n fd.seek(0x3c)\n self.ptr_to_pe_header, = struct.unpack(\"edit' % url\n","sub_path":"proj/custom_profile/templatetags/editing_tags.py","file_name":"editing_tags.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"612519839","text":"from adminworkstation.models import Path\nfrom userworkstation.models import UploadFile\n\ndef update_get_old_list(path_id):\n # 获取旧数据的 连接类型 / 交易描述 / 交易类型 / 交易路径 / 备注说明 /hash\n res = Path.objects.filter(path_id=path_id)\n data_source = [res[0].path_linetype,\n res[0].path_content,\n res[0].path_tradetype,\n res[0].path_tradepath,\n res[0].path_remark,\n res[0].path_hash]\n\n # 查找文档ID\n file_list_db = []\n for UploadFiles in UploadFile.objects(file_path_id=path_id):\n file_list_db.append(UploadFiles.file_id)\n\n # 排序\n file_list_db.sort()\n # 加入到总列表中\n data_source.append(file_list_db)\n return data_source\n","sub_path":"workstation/userwork/update_get_old_list.py","file_name":"update_get_old_list.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"80997718","text":"from git import Repo\nimport time\nimport RPi.GPIO as GPIO\nimport os\nimport shutil\nimport socket\n\n# Sleep for a while to ensure the service has internet connection.\ntime.sleep(12)\n\n# Check internet connection using the builtin socket library\ntry:\n socket.setdefaulttimeout(3)\n socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((\"8.8.8.8\", 53))\n\n# If we have no internet or see some other error, just exit to avoid destroying the current program.\nexcept socket.error as ex:\n print(ex)\n exit()\n\n# A very simple class for updating the boat remotely.\n# The idea is that we should be able to run the pi in\n# headless mode, plugged into ethernet, and have it update\n# correctly before running the actual Boatbrain.py program.\n\n# This sets the pinmode to reference the numbers in the charts,\n# not directly on the board. I do this because I'm a masochist.\nGPIO.setmode(GPIO.BCM)\n\n# Pin definitions. Each pin will correspond to a different branch in the repo.\nBranch1 = 16\nBranch2 = 13\nBranch3 = 12\n\nGPIO.setup(Branch1, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(Branch2, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(Branch3, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n# Path to the local Repo on the pi. we start by removing the directory so that we can\n# avoid merge conflicts and other things like that. Just doing a clean install should be fine.\nif os.path.exists('/home/pi/Desktop/PythonBoat'):\n shutil.rmtree('/home/pi/Desktop/PythonBoat')\n\n# Attempting to change directories so that clone_from() doesn't get angry at me.\nos.chdir('/home/pi/Desktop/')\n\n# These branches can be changed easily. Theoretically, we will never change\n# The master branch pin definition (no pin at all), so we will always be able\n# To push updated pin defs for new branches and update without having to actually\n# go into the pi itself. There may be a better way to do this.\n\n# Debugging\nprint(\"B1: \" + str(GPIO.input(Branch1)))\nprint(\"B2: \" + str(GPIO.input(Branch2)))\nprint(\"B3: \" + str(GPIO.input(Branch3)))\n\nif GPIO.input(Branch1) == GPIO.HIGH:\n print(\"Cloning JRG_Branch\")\n Repo.clone_from(\"git@github.com:JFreyWM/PythonBoat.git\", '/home/pi/Desktop/PythonBoat',\n branch='JRG_Branch')\n\nelif GPIO.input(Branch2) == GPIO.HIGH:\n print(\"Cloning LSM9DS_IMU\")\n Repo.clone_from(\"git@github.com:JFreyWM/PythonBoat.git\", '/home/pi/Desktop/PythonBoat',\n branch='LSM9DS_IMU')\n\nelif GPIO.input(Branch3) == GPIO.HIGH:\n print(\"Cloning Other\")\n # We don't have a third branch yet,\n # just leaving it here so we can expand later.\n pass\n\nelse:\n print(\"Cloning Master\")\n # If we have no input to the Pi, just pull master (no branch defaults to master).\n Repo.clone_from(\"git@github.com:JFreyWM/PythonBoat.git\", '/home/pi/Desktop/PythonBoat',\n branch=\"master\")\n\n# Sleep for a few seconds just to avoid any problems transitioning into the boatBrain program.\n# Remember, this script is called by start_boat.sh on startup.\ntime.sleep(5)\n","sub_path":"Updater.py","file_name":"Updater.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544890139","text":"def reward_function(params):\n '''\n Reward function iteration 1 (JP):\n changes: \n 1. combine all 3 examples (stay on center, keep all wheels on the track and mitigate zig-zag behaviors)\n 2. add another modifier that discourages stopping and excessive speed \n results: \n success rate = 100% from 3 runs\n record = 1min10sec\n Reward function iteration 2 (JP)\n changes: \n 1. modify stay on center so that it allows the racer to venture a bit closer to the edge\n 2. added more steering thresholds\n 3. added example for waypoints\n 4. added more speed tresholds\n '''\n\n # Read input parameters\n track_width = params['track_width']\n distance_from_center = params['distance_from_center']\n \n # Calculate 4 markers that are at varying distances away from the center line (e\n marker_1 = 0.1 * track_width\n marker_2 = 0.25 * track_width\n marker_3 = 0.5 * track_width\n marker_4 = 0.75 * track_width\n \n # Give higher reward if the car is closer to center line and vice versa\n if distance_from_center <= marker_1:\n reward = 1.0\n elif distance_from_center <= marker_2:\n reward = 0.6\n elif distance_from_center <= marker_3:\n reward = 0.3\n elif distance_from_center <= marker_4:\n reward = 0.1\n else:\n reward = 1e-3 # likely crashed/ close to off track\n\t\t\n#Taken from \"Example of rewarding the agent to stay inside the two borders of the track\" - adapted\n all_wheels_on_track = params['all_wheels_on_track']\n # Give a bonus if all the wheels are on the track and the agent is somewhere in between the track borders\n if all_wheels_on_track and (0.5*track_width - distance_from_center) >= 0.05:\n reward *= 1.5\n\t\t\n#Taken from \"Example of penalize steering, which helps mitigate zig-zag behaviors\" - adapted and modified\n steering = abs(params['steering_angle']) # Only need the absolute steering angle\n\n # Steering penality threshold, change the number based on your action space setting\n steering_threshold_1 = 30\n steering_threshold_2 = 20\n steering_threshold_3 = 10\n steering_threshold_4 = 5\n steering_threshold_5 = 1\n\n # Penalize reward if the agent is steering too much and reward if it goes straight\n if steering > steering_threshold_1:\n reward *= 0.7\n elif steering > steering_threshold_2:\n reward *= 0.8\n elif steering > steering_threshold_3:\n reward *= 0.9\n elif steering > steering_threshold_4:\n reward *= 1\n elif steering > steering_threshold_5:\n reward *= 1.1\n\n#Taken from \"Example of using waypoints and heading to make the car in the right direction\" - adapted and modified\n\n import math\n\n # Read input variables\n waypoints = params['waypoints']\n closest_waypoints = params['closest_waypoints']\n heading = params['heading']\n\n # Calculate the direction of the center line based on the closest waypoints\n next_point = waypoints[closest_waypoints[1]]\n prev_point = waypoints[closest_waypoints[0]]\n\n # Calculate the direction in radius, arctan2(dy, dx), the result is (-pi, pi) in radians\n track_direction = math.atan2(next_point[1] - prev_point[1], next_point[0] - prev_point[0]) \n # Convert to degree\n track_direction = math.degrees(track_direction)\n\n # Calculate the difference between the track direction and the heading direction of the car\n direction_diff = abs(track_direction - heading)\n\n # Penalize the reward if the difference is too large\n DIRECTION_THRESHOLD = 10.0\n if direction_diff > DIRECTION_THRESHOLD:\n reward *= 0.5\n \n# Stuff from JP :)\n speed = params['speed']\n speed_threshold_1 = 0.1\n speed_threshold_2 = 0.5\n speed_threshold_3 = 1\n speed_threshold_4 = 2\n speed_threshold_5 = 3\n speed_threshold_6 = 4\n\t# penalize if the vehicle stops or goes backwards (if possible)?\n if speed < speed_threshold_1:\n reward *= 0.3\n elif speed < speed_threshold_2:\n reward *= 0.8\n elif speed < speed_threshold_3:\n reward *= 0.9\n elif speed > speed_threshold_4:\n reward *= 1\n elif speed > speed_threshold_5:\n reward *= 0.8\n elif speed > speed_threshold_6:\n reward *= 0.5\n else:\n #speed is >= treshold_3 but <= than threshold 4\n reward *= 1.1\n\t\t\t\n return float(reward)","sub_path":"Ferraracer.py","file_name":"Ferraracer.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"17234611","text":"# Taken from https://pythonprojects.io/scatter-plot-with-matplotlib\n\nimport matplotlib.pyplot as plt\n\ndef make_scatter_plot():\n exam_scores = [50, 35, 90, 63, 85, 99, 78]\n hours_studying = [2, 1, 7, 4, 6, 9, 5]\n\n # label the x axis with Hours Spent Studying\n plt.xlabel('Hours Spent Studying')\n\n # label the y axis with Exam Scores\n plt.ylabel('Exam Scores')\n\n # we need to tell matplotlib what type of graph we'd like to use\n plt.scatter(hours_studying, exam_scores, c='b')\n\n # show our graph\n plt.show()\n\nmake_scatter_plot()\n","sub_path":"examples/run-file3.py","file_name":"run-file3.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"78620809","text":"abc=[\"abc\",\"cde\"]\nprint(abc[1])\n#集合 \na=\"qwrrttwewt\"\nb=\"wqrqqwtqwtdaf\"\nsa=set(a)\nsb=set(b)\n#求交集\nprint(sa&sb)\n#求并集\nprint(sa|sb)\n#字典 key:value\nd1={\"name\":\"rr\",\"sex\":\"man\"}","sub_path":".history/python01_20191127171344.py","file_name":"python01_20191127171344.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"225960890","text":"import re\nfrom tg.widgets import *\nfrom tg.validators import *\n \nfrom bookingapp.widgets.formfields import *\n\nsources = (\n _(u\"Google\"),\n _(u\"Google AdSense\"),\n _(u\"tourist-paradise.com\"),\n _(u\"rent-holiday-homes.com\"),\n _(u\"vacances-a-louer.fr\"),\n _(u\"alojamiento-vacaciones\"),\n _(u\"vacantis.com\"),\n _(u\"other\")\n)\n\n\nclass ContactForm(TableForm):\n class ContactFields(WidgetsList):\n name = TextField()\n email = TextField()\n phone = TextField()\n message = TextArea()\n source = SingleSelectField(\n label = _(u\"How did you hear about this website?\"),\n options = (('', _(u\"please choose\")),) + tuple(\n (s, s) for s in sources))\n \n class ContactSchema(Schema):\n name = UnicodeString(not_empty=True, strip=True)\n email = Email(not_empty=True, strip=True)\n phone = UniversalPhoneNumber(strip=True)\n message = UnicodeString(not_empty=True, strip=True)\n source = UnicodeString(not_empty=True, strip=True)\n \n fields = ContactFields()\n validator = ContactSchema()\n\n","sub_path":"bookingapp/widgets/contact.py","file_name":"contact.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491557914","text":"# -*- coding:utf-8 -*-\n\"\"\"\nDjango settings for ud project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Учетная запись Windows Active Directory для обращения к сетевым ресурсам\nAD_DOMAIN = \"admgor\"\nAD_USERNAME = \"delo\"\nAD_PASSWORD = \"Vologda123456\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nimport ldap\nfrom django_auth_ldap.config import LDAPSearch, LDAPSearchUnion\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# Baseline configuration.\nAUTH_LDAP_SERVER_URI = \"ldap://192.168.1.248\"\n\n#AUTH_LDAP_START_TLS = True\n\nAUTH_LDAP_BIND_DN = r\"%s\\%s\" % (AD_DOMAIN, AD_USERNAME)\nAUTH_LDAP_BIND_PASSWORD = AD_PASSWORD\n\nAUTH_LDAP_USER_SEARCH = LDAPSearchUnion(\n LDAPSearch(u\"ou=Пользователи,dc=%s,dc=local\" % AD_DOMAIN,\n ldap.SCOPE_SUBTREE, u\"(sAMAccountName=%(user)s)\"),\n LDAPSearch(u\"cn=users,dc=%s,dc=local\" % AD_DOMAIN,\n ldap.SCOPE_SUBTREE, u\"(sAMAccountName=%(user)s)\"),\n LDAPSearch(u\"ou=Управляемые пользователи,dc=%s,dc=local\" % AD_DOMAIN,\n ldap.SCOPE_SUBTREE, u\"(sAMAccountName=%(user)s)\"),\n LDAPSearch(u\"ou=Администраторы,dc=%s,dc=local\" % AD_DOMAIN,\n ldap.SCOPE_SUBTREE, u\"(sAMAccountName=%(user)s)\")\n)\n\n# or perhaps:\n#AUTH_LDAP_USER_DN_TEMPLATE = AD_DOMAIN + r\"\\%(user)s\"\n\n# Populate the Django user from the LDAP directory.\nAUTH_LDAP_USER_ATTR_MAP = {\n \"first_name\": \"givenName\",\n \"last_name\": \"sn\",\n \"email\": \"mail\"\n}\n\nAUTH_LDAP_PROFILE_ATTR_MAP = {\n \"employee_number\": \"employeeNumber\"\n}\n\n#AUTH_LDAP_USER_FLAGS_BY_GROUP = {\n #\"is_active\": True\n#}\n\n# This is the default, but I like to be explicit.\nAUTH_LDAP_ALWAYS_UPDATE_USER = True\n\n# Use LDAP group membership to calculate group permissions.\nAUTH_LDAP_FIND_GROUP_PERMS = False\n\n# Cache group memberships for an hour to minimize LDAP traffic\nAUTH_LDAP_CACHE_GROUPS = True\nAUTH_LDAP_GROUP_CACHE_TIMEOUT = 3600\n\n# Keep ModelBackend around for per-user permissions and maybe a local\n# superuser.\nAUTHENTICATION_BACKENDS = (\n 'django_auth_ldap.backend.LDAPBackend', # У нас используется ��емножко поправленный вариант — 'geoportal.auth.ActiveDirectoryBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nRAVEN_CONFIG = {\n 'dsn': 'http://a6c080617fec4256b639621b5ac9e0c3:b280f02a71064207a2aeccff159a548f@192.168.1.241:9000/4',\n}\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'b+5mobzk8_#ue@$mo6g1x!xw_^pu5+%gu3+*3s+o!qrvl+yh4#'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nTEMPLATE_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nINTERNAL_IPS = [\n '10.16.7.216', # Белов А.А.\n '192.168.0.57', # Яковлев А.С.\n]\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'raven.contrib.django.raven_compat',\n # Other\n 'compressor',\n # Own\n 'deloreports',\n\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'ud.urls'\n\nWSGI_APPLICATION = 'ud.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'ud',\n 'USER': 'ud',\n 'PASSWORD': 'ud3214',\n 'HOST': '10.16.1.41',\n 'PORT': '5432',\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'ru-RU'\n\nTIME_ZONE = 'Europe/Moscow'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static/')\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # other finders..\n 'compressor.finders.CompressorFinder',\n)\n\n\ntry:\n from .local_settings import *\nexcept ImportError:\n pass\n\n","sub_path":"ud/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"609570096","text":"import re\nimport argparse\nfrom datetime import timedelta, datetime\n\nimport whois\nimport requests\n\n\ndef parse() -> bytes:\n parser = argparse.ArgumentParser(\n description='''Simple site monitoring script.\n Check http status code (200 is OK) and expiration date\n (no least than 30 days)''')\n parser.add_argument('target',\n type=str,\n help=\"path to the url list (txt)\")\n args = parser.parse_args()\n return args.target\n\n\ndef adapt_urls(urls):\n http = re.compile(r\"^(http|https)://\")\n transform = lambda url: \"http://{}\".format(url)\n return [url if http.match(url) else transform(url) for url in urls]\n\n\ndef load_urls4check(path):\n with open(path, 'r') as opened_file:\n return [url.strip() for url in opened_file]\n\n\ndef is_server_respond_with_200(url):\n try:\n return requests.get(url).status_code == requests.codes.ok\n except requests.exceptions.ConnectionError:\n return False\n\n\ndef get_domain_expiration_date(url):\n try:\n domain = whois.whois(url)\n date_delta = timedelta(days=30)\n domain_date = domain.expiration_date\n return (domain_date - date_delta) > datetime.now()\n except TypeError:\n return False\n\n\ndef main():\n url_list = parse()\n raw_urls = load_urls4check(url_list)\n urls = adapt_urls(raw_urls)\n for url in urls:\n status_code = is_server_respond_with_200(url)\n expr_date = get_domain_expiration_date(url)\n print(\"Domain: {}\".format(url))\n print(\"HTTP (Code 200): {}\".format(status_code))\n print(\"Expiration date (at least 30 days): {}\".format(expr_date))\n print('==========================================================')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"check_sites_health.py","file_name":"check_sites_health.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"602911039","text":"# -*- coding:utf-8 -*-\n\n\nimport tensorflow as tf\nfrom numpy.random import RandomState\n\nw1 = tf.Variable(tf.random_normal([2,3], stddev=1, seed=1))\nw2 = tf.Variable(tf.random_normal([3,1], stddev=1, seed=1))\n\n# x = tf.constant([[0.7, 0.9]])\nx = tf.placeholder(tf.float32, shape=(None,2,), name=\"input\")\ny_ = tf.placeholder(tf.float32, shape=(None,1,), name=\"y_result\")\n\na = tf.matmul(x, w1)\ny = tf.matmul(a, w2)\n\n# 定义损失函数和反向传播算法\ncorss_entropy = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))\ntrain_step = tf.train.AdamOptimizer(0.001).minimize(corss_entropy)\n\n# 生产训练数据\nrdm = RandomState(1)\ndataset_size = 128\nX = rdm.rand(dataset_size, 2)\nY = [[int(x1+x2 < 1)] for (x1,x2) in X]\n\nbatchSize = 8\nwith tf.Session() as sess:\n initOps = tf.global_variables_initializer()\n sess.run(initOps)\n print( sess.run(w1))\n print( sess.run(w2))\n\n for i in range(0, 5000):\n start = (i * batchSize) % dataset_size\n end = min(start + batchSize, dataset_size)\n sess.run(train_step, feed_dict={x: X[start:end], y_:Y[start:end]})\n\n if i % 100 == 0:\n totalCrossEntropy = sess.run(corss_entropy, feed_dict={x: X, y_: Y})\n print(\"i=%s, crossEntropy=%s\" % (i, totalCrossEntropy))\n\n print(sess.run(w1))\n print(sess.run(w2))\n","sub_path":"tensorflow/tsflow/demo04-final.py","file_name":"demo04-final.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643100769","text":"# 一些工具函数\r\nimport os\r\nimport re\r\nimport time\r\nimport datetime\r\n\r\n\r\n# 保存requests.content/text\r\ndef SaveHtml(content, savefile, filename):\r\n\tif not os.path.exists(savefile):\r\n\t\tos.mkdir(savefile)\r\n\tf = open(os.path.join(savefile, filename), 'w')\r\n\tf.write(str(content))\r\n\tf.close()\r\n\r\n\r\n# 保存Cookie\r\ndef SaveCookie(cookie, savefile='./data'):\r\n\tif not os.path.exists(savefile):\r\n\t\tprint('[Warning]: %s inexistence, create new one...' % savefile)\r\n\t\tos.mkdir(savefile)\r\n\tf = open(os.path.join(savefile, 'cookie.info'), 'w')\r\n\tf.write(str(cookie))\r\n\tf.close()\r\n\r\n\r\n# 读取Cookie\r\ndef ReadCookie(datafile='./data'):\r\n\tif not os.path.exists(datafile):\r\n\t\tprint('[Warning]: %s inexistence in ...' % datafile)\r\n\t\treturn None\r\n\ttxtpath = os.path.join(datafile, 'cookie.info')\r\n\tif not os.path.isfile(txtpath):\r\n\t\tprint('[Warning]: %s inexistence in ...' % txtpath)\r\n\t\treturn None\r\n\tf = open(txtpath, 'r')\r\n\tcookie = f.read().strip()\r\n\treturn cookie if cookie else None\r\n\r\n\r\n# 获得Headers\r\ndef GetHeader(cookie=None):\r\n\tif cookie:\r\n\t\theaders = {\r\n\t\t\t\"accept-language\": \"zh-CN,zh;q=0.9\",\r\n\t\t\t\"accept-encoding\": \"gzip, deflate, sdch, br\",\r\n\t\t\t\"accept\": \"*/*\",\r\n\t\t\t\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36\",\r\n\t\t\t\"cookie\": cookie\r\n\t\t}\r\n\telse:\r\n\t\theaders = {\r\n\t\t\t\"accept-language\": \"zh-CN,zh;q=0.9\",\r\n\t\t\t\"accept-encoding\": \"gzip, deflate, sdch, br\",\r\n\t\t\t\"accept\": \"*/*\",\r\n\t\t\t\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36\"\r\n\t\t}\r\n\treturn headers\r\n\r\n\r\n# 获得gtk\r\ndef GetGtk(skey):\r\n\tthash = 5381\r\n\tfor c in skey:\r\n\t\tthash += (thash<<5) + ord(c)\r\n\treturn thash&2147483647\r\n\r\n\r\n# 获得skey\r\ndef GetSkey(cookie):\r\n\titem = re.findall(r'p_skey=(.*?);', cookie)\r\n\treturn item[0] if len(item) > 0 else None\r\n\r\n\r\n# 读取我关心和关心我的好友QQ\r\ndef ReadCared(datafile):\r\n\tfriends = []\r\n\tf = open(datafile)\r\n\tfor line in f:\r\n\t\tline = line.strip()\r\n\t\tif line:\r\n\t\t\ttry:\r\n\t\t\t\tint(line[0])\r\n\t\t\texcept:\r\n\t\t\t\tcontinue\r\n\t\t\tfriend = line.split(' ')\r\n\t\t\tfor fri in friend:\r\n\t\t\t\tif fri not in friends:\r\n\t\t\t\t\tfriends.append(fri)\r\n\treturn friends\r\n\r\n\r\n# 画柱状图\r\ndef DrawBar(data, mark_point=[\"min\", \"max\"], barname=None):\r\n\tfrom pyecharts import Bar\r\n\tif barname is not None:\r\n\t\tbar = Bar(barname)\r\n\telse:\r\n\t\tbar = Bar()\r\n\ttry:\r\n\t\tbar.add('', data[0], data[1], mark_point=mark_point)\r\n\texcept:\r\n\t\tprint('[Error]: Arguments format error in ...')\r\n\t\treturn None\r\n\tif barname is None:\r\n\t\tbarname = 'results'\r\n\tbar.render('%s.html' % barname)\r\n\r\n\r\n# 画饼图\r\ndef DrawPie(data, piename=None):\r\n\tfrom pyecharts import Pie\r\n\tif piename is not None:\r\n\t\tpie = Pie(piename)\r\n\telse:\r\n\t\tpie = Pie()\r\n\ttry:\r\n\t\tpie.add('', data[0], data[1], is_label_show=True)\r\n\texcept:\r\n\t\tprint('[Error]: Arguments format error in ...')\r\n\t\treturn None\r\n\tif piename is None:\r\n\t\tpiename = 'results'\r\n\tpie.render('%s.html' % piename)\r\n\r\n\r\n# 画地图\r\ndef DrawMap(data, mapname=''):\r\n\tfrom pyecharts import Map\r\n\tmap_ = Map(mapname, width=1200, height=600)\r\n\ttry:\r\n\t\tmap_.add('', data[0], data[1], maptype='china', is_visualmap=True, visual_text_color='#000')\r\n\texcept:\r\n\t\tprint('[Error]: Arguments format error in ...')\r\n\t\treturn None\r\n\tif mapname is '':\r\n\t\tmapname = 'results'\r\n\tmap_.render('%s.html' % mapname)\r\n\r\n\r\n# 性别统计\r\ndef CountSex(friendsInfoDict):\r\n\tboy = 0\r\n\tgirl = 0\r\n\tother = 0\r\n\tfor key in friendsInfoDict:\r\n\t\ttry:\r\n\t\t\tif friendsInfoDict[key]['sex'] == '2':\r\n\t\t\t\tgirl += 1\r\n\t\t\telif friendsInfoDict[key]['sex'] == '1':\r\n\t\t\t\tboy += 1\r\n\t\t\telse:\r\n\t\t\t\tother += 1\r\n\t\texcept:\r\n\t\t\tother += 1\r\n\treturn boy, girl, other\r\n\r\n\r\n# 年龄统计\r\ndef CountAge(friendsInfoDict):\r\n\tageDict = {}\r\n\tfor key in friendsInfoDict:\r\n\t\ttry:\r\n\t\t\tbirthyear = friendsInfoDict[key]['birthyear']\r\n\t\texcept:\r\n\t\t\tbirthyear = ''\r\n\t\tif birthyear == '' or birthyear == '0':\r\n\t\t\tageDict['other'] = ageDict.get('other', 0) + 1\r\n\t\telse:\r\n\t\t\tageDict[birthyear] = ageDict.get(birthyear, 0) + 1\r\n\titems = sorted(ageDict.items(), key=lambda x: x[0], reverse=True)\r\n\tcounts = []\r\n\tages = []\r\n\tfor item in items:\r\n\t\tbirthyear = item[0]\r\n\t\tcount = item[1]\r\n\t\tif birthyear != 'other':\r\n\t\t\tage = datetime.datetime.now().year - int(birthyear)\r\n\t\t\tages.append(age)\r\n\t\t\tcounts.append(count)\r\n\t\telse:\r\n\t\t\tages.append('Unkown')\r\n\t\t\tcounts.append(count)\r\n\treturn [ages, counts]\r\n\r\n\r\n# 地区统计\r\ndef CountArea(friendsInfoDict, areatype):\r\n\tareaDict = {}\r\n\tfor key in friendsInfoDict:\r\n\t\ttry:\r\n\t\t\tarea = friendsInfoDict[key][areatype]\r\n\t\texcept:\r\n\t\t\tarea = ''\r\n\t\tif area == '':\r\n\t\t\tarea = 'Unkown'\r\n\t\tareaDict[area] = areaDict.get(area, 0) + 1\r\n\treturn [list(areaDict.keys()), list(areaDict.values())]\r\n\r\n\r\n# 解析好友信息\r\ndef ParseFriendsInfo(qq, t_qq, datafile='./results'):\r\n\tinfo_txt = os.path.join(datafile, qq, t_qq+'_info.txt')\r\n\tif not os.path.exists(info_txt):\r\n\t\treturn None\r\n\tinfoDict = {}\r\n\twith open(info_txt) as f:\r\n\t\tfor line in f:\r\n\t\t\tline=line.strip()\r\n\t\t\t# 昵称\r\n\t\t\tif line.startswith('\"nickname\":'):\r\n\t\t\t\tnickname = re.findall(r'\"nickname\":\"(.*?)\",', line)[0]\r\n\t\t\t\tinfoDict['nickname'] = nickname\r\n\t\t\t\tcontinue\r\n\t\t\t# 空间名\r\n\t\t\telif line.startswith('\"spacename\":'):\r\n\t\t\t\tspacename = re.findall(r'\"spacename\":\"(.*?)\",', line)[0]\r\n\t\t\t\tinfoDict['spacename'] = spacename\r\n\t\t\t\tcontinue\r\n\t\t\t# 空间简介\r\n\t\t\telif line.startswith('\"desc\":'):\r\n\t\t\t\tdesc = re.findall(r'\"desc\":\"(.*?)\",', line)[0]\r\n\t\t\t\tinfoDict['desc'] = desc\r\n\t\t\t\tcontinue\r\n\t\t\t# 空间签名\r\n\t\t\telif line.startswith('\"signature\":'):\r\n\t\t\t\tsignature = re.findall(r'\"signature\":\"(.*?)\",', line)[0]\r\n\t\t\t\tinfoDict['signature'] = signature\r\n\t\t\t\tcontinue\r\n\t\t\t# 性别\r\n\t\t\telif line.startswith('\"sex\":'):\r\n\t\t\t\tsex = re.findall(r'\"sex\":(.*?),', line)[0]\r\n\t\t\t\tinfoDict['sex'] = sex\r\n\t\t\t\tcontinue\r\n\t\t\t# 出生年\r\n\t\t\telif line.startswith('\"birthyear\":'):\r\n\t\t\t\tbirthyear = re.findall(r'\"birthyear\":(.*?),', line)[0]\r\n\t\t\t\tinfoDict['birthyear'] = birthyear\r\n\t\t\t\tcontinue\r\n\t\t\t# 出生月日\r\n\t\t\telif line.startswith('\"birthday\":'):\r\n\t\t\t\tbirthday = re.findall(r'\"birthday\":\"(.*?)\",', line)[0]\r\n\t\t\t\tinfoDict['birthday'] = birthday\r\n\t\t\t\tcontinue\r\n\t\t\t# 血型\r\n\t\t\telif line.startswith('\"bloodtype\":'):\r\n\t\t\t\tbloodtype = re.findall(r'\"bloodtype\":(.*?),', line)[0]\r\n\t\t\t\tinfoDict['bloodtype'] = bloodtype\r\n\t\t\t\tcontinue\r\n\t\t\t# 星座\r\n\t\t\telif line.startswith('\"constellation\":'):\r\n\t\t\t\tconstellation = re.findall(r'\"constellation\":(.*?),', line)[0]\r\n\t\t\t\tinfoDict['constellation'] = constellation\r\n\t\t\t\tcontinue\r\n\t\t\t# 国家\r\n\t\t\telif line.startswith('\"country\":'):\r\n\t\t\t\tcountry = re.findall(r'\"country\":\"(.*?)\",', line)[0]\r\n\t\t\t\tinfoDict['country'] = country\r\n\t\t\t\tcontinue\r\n\t\t\t# 省\r\n\t\t\telif line.startswith('\"province\":'):\r\n\t\t\t\tprovince=re.findall(r'\"province\":\"(.*?)\",', line)[0]\r\n\t\t\t\tinfoDict['province'] = province\r\n\t\t\t\tcontinue\r\n\t\t\t# 城市\r\n\t\t\telif line.startswith('\"city\":'):\r\n\t\t\t\tcity=re.findall(r'\"city\":\"(.*?)\",', line)[0]\r\n\t\t\t\tinfoDict['city'] = city\r\n\t\t\t\tcontinue\r\n\t\t\t# 家乡国\r\n\t\t\telif line.startswith('\"hco\":'):\r\n\t\t\t\thco = re.findall(r'\"hco\":\"(.*?)\",', line)[0]\r\n\t\t\t\tinfoDict['hco'] = hco\r\n\t\t\t\tcontinue\r\n\t\t\t# 家乡省\r\n\t\t\telif line.startswith('\"hp\":'):\r\n\t\t\t\thp=re.findall(r'\"hp\":\"(.*?)\",', line)[0]\r\n\t\t\t\tinfoDict['hp'] = hp\r\n\t\t\t\tcontinue\r\n\t\t\t# 家乡城\r\n\t\t\telif line.startswith('\"hc\":'):\r\n\t\t\t\thc = re.findall(r'\"hc\":\"(.*?)\",', line)[0]\r\n\t\t\t\tinfoDict['hc'] = hc\r\n\t\t\t\tcontinue\r\n\t\t\t# 婚否\r\n\t\t\telif line.startswith('\"marriage\":'):\r\n\t\t\t\tmarriage=re.findall(r'\"marriage\":(.*?),', line)[0]\r\n\t\t\t\tinfoDict['marriage'] = marriage\r\n\t\t\t\tcontinue\r\n\t\t\t# 职业\r\n\t\t\telif line.startswith('\"career\":'):\r\n\t\t\t\tcareer = re.findall(r'\"career\":\"(.*?)\",', line)[0]\r\n\t\t\t\tinfoDict['career'] = career\r\n\t\t\t\tcontinue\r\n\t\t\t# 公司\r\n\t\t\telif line.startswith('\"company\":'):\r\n\t\t\t\tcompany = re.findall(r'\"company\":\"(.*?)\",', line)[0]\r\n\t\t\t\tinfoDict['company'] = company\r\n\t\t\t\tcontinue\r\n\t\t\t# 最后修改时间\r\n\t\t\telif line.startswith('\"ptimestamp\":'):\r\n\t\t\t\tptimestamp = re.findall(r'\"ptimestamp\":(.*?)}', line)[0]\r\n\t\t\t\tif ptimestamp != '':\r\n\t\t\t\t\ttemp = time.localtime(float(ptimestamp))\r\n\t\t\t\t\tcreatetime = time.strftime('%Y-%m-%d %H:%M:%S', temp)\r\n\t\t\t\telse:\r\n\t\t\t\t\tcreatetime = ''\r\n\t\t\t\tinfoDict['createtime'] = createtime\r\n\t\t\t\tcontinue\r\n\treturn infoDict","sub_path":"spider-project/qq下/QQProject/QQProject/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"405827671","text":"annual_salary = int(input(\"Enter your annual salary:\"))\r\nportion_saved = float(input(\"Enter the percent of your salary to save, as a decimal:\"))\r\ntotal_cost = int(input(\"Enter the cost of your dream home:\"))\r\n\r\nmonthly_salary = annual_salary / 12\r\n# Assume that our down payment is 25%\r\nportion_down_payment = total_cost * 0.25\r\n# Assume that our investments earn a return of 4% a year\r\nr = 0.04\r\ncurrent_savings = 0\r\nmonths_count = 0\r\n\r\nwhile current_savings < portion_down_payment:\r\n months_count += 1\r\n\r\n current_savings += monthly_salary * portion_saved + current_savings * r / 12\r\n\r\n\r\nprint(\"Number of months:\", months_count)","sub_path":"ps1a.py","file_name":"ps1a.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"579717839","text":"import os\nfrom envParser import EnvParser\n\nparser = EnvParser()\nparser.parse()\n\n# where .torrent files should be copied, default value: /home/$USER/rtorrent/watch/start/\nWATCH_DIR = parser.get('WATCH_DIR')\n\n# the maximum size allowed to download. set to -1 to ignore default: 50GiB\nMAX_SIZE = int(parser.get('MAX_SIZE'))\n\n# if the program should run in a loop\nAUTOMATIC_UPDATE = int(parser.get('AUTOMATIC_UPDATE')) == 1\n\n# after how many minutes the program should rerun; default: 30\nUPDATE_AFTER_MINUTES = float(parser.get('UPDATE_AFTER_MINUTES'))\n\n# url of rss feed\nRSS_FEED = parser.get('RSS_FEED')\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"130143472","text":"\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved\n\nAuthor: Dejiao Zhang (dejiaoz@amazon.com)\nDate: 02/26/2021\n\"\"\"\n\nimport sys\nsys.path.append( './' )\n\nimport torch\nimport argparse\nfrom sentence_transformers import SentenceTransformer\nfrom models.Transformers import SCCLBert\nfrom learners.cluster import ClusterLearner\nfrom dataloader.dataloader import augment_loader\nfrom training import training\nfrom utils.kmeans import get_kmeans_centers\nfrom utils.logger import setup_path\nfrom utils.randomness import set_global_random_seed\n\nMODEL_CLASS = {\n \"distil\": 'distilbert-base-nli-stsb-mean-tokens', \n \"robertabase\": 'roberta-base-nli-stsb-mean-tokens',\n \"robertalarge\": 'roberta-large-nli-stsb-mean-tokens',\n \"msmarco\": 'distilroberta-base-msmarco-v2',\n \"xlm\": \"xlm-r-distilroberta-base-paraphrase-v1\",\n \"bertlarge\": 'bert-large-nli-stsb-mean-tokens',\n \"bertbase\": 'bert-base-nli-stsb-mean-tokens',\n}\n\ndef run(args):\n resPath, tensorboard = setup_path(args)\n args.resPath, args.tensorboard = resPath, tensorboard\n set_global_random_seed(args.seed)\n\n # dataset loader\n train_loader = augment_loader(args)\n\n # model\n torch.cuda.set_device(args.gpuid[0])\n sbert = SentenceTransformer(MODEL_CLASS[args.bert])\n cluster_centers = get_kmeans_centers(sbert, train_loader, args.num_classes) \n model = SCCLBert(sbert, cluster_centers=cluster_centers, alpha=args.alpha) \n model = model.cuda()\n\n # optimizer \n optimizer = torch.optim.Adam([\n {'params':model.sentbert.parameters()}, \n {'params':model.head.parameters(), 'lr': args.lr*args.lr_scale},\n {'params':model.cluster_centers, 'lr': args.lr*args.lr_scale}], lr=args.lr)\n print(optimizer)\n \n # set up the trainer \n learner = ClusterLearner(model, optimizer, args.temperature, args.base_temperature)\n training(train_loader, learner, args)\n return None\n\ndef get_args(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpuid', nargs=\"+\", type=int, default=[0], help=\"The list of gpuid, ex:--gpuid 3 1. Negative value means cpu-only\")\n parser.add_argument('--seed', type=int, default=0, help=\"\")\n parser.add_argument('--print_freq', type=float, default=100, help=\"\") \n parser.add_argument('--result_path', type=str, default='./results/')\n parser.add_argument('--bert', type=str, default='distill', help=\"\")\n # Dataset\n parser.add_argument('--dataset', type=str, default='searchsnippets', help=\"\")\n parser.add_argument('--data_path', type=str, default='../datasets/')\n parser.add_argument('--dataname', type=str, default='searchsnippets.csv', help=\"\")\n parser.add_argument('--num_classes', type=int, default=8, help=\"\")\n parser.add_argument('--max_length', type=int, default=32)\n # Learning parameters\n parser.add_argument('--lr', type=float, default=1e-5, help=\"\")\n parser.add_argument('--lr_scale', type=int, default=100, help=\"\")\n parser.add_argument('--max_iter', type=int, default=3000)\n # contrastive learning\n parser.add_argument('--batch_size', type=int, default=400)\n parser.add_argument('--temperature', type=float, default=0.5, help=\"temperature required by contrastive loss\")\n parser.add_argument('--base_temperature', type=float, default=0.1, help=\"temperature required by contrastive loss\")\n # Clustering\n parser.add_argument('--use_perturbation', action='store_true', help=\"\")\n parser.add_argument('--alpha', type=float, default=1.0)\n \n args = parser.parse_args(argv)\n args.use_gpu = args.gpuid[0] >= 0\n args.resPath = None\n args.tensorboard = None\n\n return args\n\nif __name__ == '__main__':\n run(get_args(sys.argv[1:]))\n\n\n\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"420648643","text":"\"\"\"Defines the central class of Pylambder\"\"\"\n\nimport inspect\nimport logging\nfrom pathlib import Path\nimport os\n\nimport boto3\n\nfrom pylambder import config\nfrom pylambder.aws_task import CloudFunction\nfrom pylambder.websocket import WebsocketHandler\n\nlogger = logging.getLogger(__name__)\n\n\nclass Pylambder:\n \"\"\"Like Celery\"\"\"\n\n # tasks: Dict[aws_task.TaskId, aws_task.AWSTask]\n # websocket_handler: WebsocketHandler\n\n def __init__(self):\n if not self._is_lambda():\n config.ensure_loaded()\n self.api_url = self._obtain_api_url()\n self.tasks = dict()\n self.websocket_hander = WebsocketHandler(self)\n self.websocket_hander.run()\n\n def task(self, function):\n \"\"\"Function decorator turning it into CloudFunction. Named 'task'\n because of Celery\"\"\"\n module = _getmodule(function)\n function_name = function.__name__\n return CloudFunction(function, module, function_name, self)\n\n @staticmethod\n def _obtain_api_url():\n cloudformation = boto3.resource('cloudformation')\n stackname = config.CLOUDFORMATION_STACK\n stack = cloudformation.Stack(stackname)\n return [x for x in stack.outputs if x['OutputKey'] == 'WebSocketURI'][0]['OutputValue']\n\n @staticmethod\n def _is_lambda():\n return 'LAMBDA_TASK_ROOT' in os.environ\n\n\ndef _getmodule(func) -> str:\n \"\"\"Extract module name of a function.\n Root directory for the module name must be consistent with\n the root directory of project upload to AWS\"\"\"\n module_path = Path(inspect.getmodule(func).__file__)\n module_name = module_path.with_suffix('').name\n path = module_path.parent\n\n # traverse directories until project root is reached,\n # that is contains requirement.txt or pylambder_config.py\n while not ((path / 'requirements.txt').is_file() or\n (path / 'pylambder_config.py').is_file()) and \\\n path not in [Path('.'), Path('/')]:\n module_name = path.name + '.' + module_name\n path = path.parent\n return module_name\n","sub_path":"pylambder/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"188973459","text":"\"\"\"\nThis module contains the information for a abstract DataViewTab.\n\"\"\"\nfrom operator import itemgetter\nfrom datetime import date\n\nfrom PyQt5.QtWidgets import (QWidget, QTableView, QVBoxLayout, QHBoxLayout, QApplication, QFileDialog)\nfrom PyQt5.QtCore import QAbstractTableModel, QVariant, Qt, QSortFilterProxyModel\nfrom PyQt5.QtGui import QColor, QBrush\n\nfrom other.utils import convertValue\n\nclass StationToolTableModel(QAbstractTableModel):\n \"\"\"\n Class inheriting QAbstractTableModel for handling table operations for the DataViewTab objects table_view object.\n \"\"\"\n def __init__(self, parent, header, data, editable = False):\n QAbstractTableModel.__init__(self, parent)\n self.header_data = [h[0] for h in header]\n self.header_types = [h[1] for h in header]\n self.array_data = data\n self.editable = editable\n\n def removeSelectedFromTable(self, index):\n \"\"\"\n Remove selected row from table\n \"\"\"\n if index != -1 or index >= len(self.array_data):\n del self.array_data[index]\n self.layoutChanged.emit()\n\n def rowCount(self, parent):\n \"\"\"\n Overrided function for calculating the number of rows\n \"\"\"\n return len(self.array_data)\n\n def columnCount(self, parent):\n \"\"\"\n Overrided function for calculating the number of columns\n \"\"\"\n return len(self.header_data)\n\n def flags(self, index):\n \"\"\"\n Overrided flags function for enabling data editing\n \"\"\"\n if self.editable:\n return Qt.ItemIsEnabled | Qt.ItemIsEditable\n else:\n return Qt.ItemIsEnabled\n\n def setData(self, index, value, role):\n \"\"\"\n Overridden function for setting data\n \"\"\"\n if self.editable and index.isValid():\n data_value = convertValue(value, self.header_types[index.column()])\n if data_value is not None:\n try:\n self.array_data[index.row()][index.column()] = data_value\n except:\n return False\n self.parent().resizeAll()\n self.layoutChanged.emit()\n return True\n return False\n\n def data(self, index, role):\n \"\"\"\n Overrided function for getting data from certain index\n \"\"\"\n if index.isValid():\n if role == Qt.BackgroundRole:\n if self.array_data[index.row()][0] not in self.getSelectedIds() and len(self.getSelectedIds()) != 0:\n return QVariant(QBrush(QColor('lightGray')))\n else:\n return QVariant(QBrush(QColor('white')))\n if role == Qt.DisplayRole:\n if isinstance(self.array_data[index.row()][index.column()], date):\n return QVariant(self.array_data[index.row()][index.column()].strftime(\"%d/%m/%Y\"))\n return QVariant(self.array_data[index.row()][index.column()])\n return QVariant()\n\n def headerData(self, col, orientation, role):\n \"\"\"\n Overrided function for getting headerData from certain column\n \"\"\"\n if orientation == Qt.Horizontal and role == Qt.DisplayRole:\n return QVariant(self.header_data[col])\n return QAbstractTableModel.headerData(self, col, orientation, role)\n\n def getSelectedIds(self):\n \"\"\"\n Function for getting the selected ids for sorting. Override this!\n \"\"\"\n raise Exception(\"Do not use this function but override it in the child objects!\")\n\n def updateTab(self):\n \"\"\"\n Function for updating the this tab\n \"\"\"\n self.layoutAboutToBeChanged.emit()\n self.layoutChanged.emit()\n\n def sort(self, col, order = Qt.AscendingOrder):\n \"\"\"\n Function for sorting a column.\n \"\"\"\n self.layoutAboutToBeChanged.emit()\n\n chosen = []\n not_chosen = []\n chosen_ids = self.getSelectedIds()\n\n for data in self.array_data:\n if data[0] in chosen_ids:\n chosen.append(data)\n else:\n not_chosen.append(data)\n\n if self.header_types[col] is str:\n chosen.sort(key=lambda x: x[col] or '')\n not_chosen.sort(key=lambda x: x[col] or '')\n elif self.header_types[col] is float:\n chosen.sort(key=lambda x: x[col] or -99999999999.999)\n not_chosen.sort(key=lambda x: x[col] or -99999999999.999)\n elif self.header_types[col] is int:\n chosen.sort(key=lambda x: x[col] or -99999999999)\n not_chosen.sort(key=lambda x: x[col] or -99999999999)\n elif self.header_types[col] is date:\n chosen.sort(key=lambda x: x[col] or date(1900,1,1))\n not_chosen.sort(key=lambda x: x[col] or date(1900,1,1))\n elif self.header_types[col] is datetime:\n chosen.sort(key=lambda x: x[col] or datetime(1900,1,1))\n not_chosen.sort(key=lambda x: x[col] or datetime(1900,1,1))\n\n if order != Qt.AscendingOrder:\n chosen.reverse()\n not_chosen.reverse()\n\n self.array_data = chosen + not_chosen\n\n self.layoutChanged.emit()\n\n def insertNewDataRow(self, data):\n \"\"\"\n Function for inserting a new row to the table.\n \"\"\"\n if len(data) != len(self.header_data):\n raise Exception(\"Data array not of right length: expected {0} received {1}\".format(len(self.header_data), len(data)))\n\n for i in range (0, len(data)):\n if not isinstance(data[i], self.header_types[i]) and data[i] is not None:\n raise Exception(\"Data not of right type: expected {0} received {1}\".format(self.header_types[i], type(data[i])))\n\n self.array_data.append(data)\n self.layoutChanged.emit()\n self.parent().resizeAll()\n return True\n\n def clearModelData(self):\n \"\"\"\n Function for clearing all Data from model\n \"\"\"\n self.array_data = []\n self.layoutChanged.emit()\n\nclass AbstractDatabaseTableModel(StationToolTableModel):\n \"\"\"\n Class for handling database table models. All data in this model will be fetched from the database. This data can be modified in some ways. This class needs to be extended on because of how the different database models function in the database.\n \"\"\"\n def __init__(self, parent, header, data, database_api):\n StationToolTableModel.__init__(self, parent, header, data, False)\n self.database_api = database_api\n self.fetchDataFromDB()\n\n def fetchDataFromDB(self):\n \"\"\"\n Function for fetching all relevant data from the database and giving them to the DatabaseTableModel. Redefine this to first clear the existing table and then fetch all data from the database_api.\n \"\"\"\n raise Exception(\"Do not use AbstractDatabaseTableModel but inherit it to your own class\")\n\nclass AbstractStorageTableModel(StationToolTableModel):\n \"\"\"\n Class for handling temporary table models. All data is first imported to temporary storage table model. It will then be either abandoned or pushed to the database and then destroyed.\n \"\"\"\n def __init__(self, parent, header, data, database_model):\n StationToolTableModel.__init__(self, parent, header, data, True)\n self.database_model = database_model\n\n def getSelectedIds(self):\n \"\"\"\n Overridden function for getSelectedIds\n \"\"\"\n return []\n\n def pushDataToDatabase(self):\n \"\"\"\n Function for pushing data from the storage table to the Actual database.\n \"\"\"\n raise Exception(\"pushDataToDatabase has not been redefined! Inherit AbstractStorageTableModel to your own class and redefine pushDataToDatabase for this to work\")\n\nclass DataViewTab(QWidget):\n \"\"\"\n DataViewTab for handling the table data.\n \"\"\"\n def __init__(self, parent, buttons = None):\n super(QWidget, self).__init__(parent)\n self.database_view = QTableView()\n self.storage_view = QTableView()\n\n self.database_view.setSortingEnabled(True)\n self.database_view.resizeColumnsToContents()\n\n self.storage_view.setSortingEnabled(True)\n self.storage_view.resizeColumnsToContents()\n self.storage_view.setFixedHeight(200)\n\n self.layout = QVBoxLayout(self)\n self.layout.addWidget(self.database_view)\n self.layout.addWidget(self.storage_view)\n\n if buttons is not None:\n self.layout.addWidget(buttons)\n\n self.setLayout(self.layout)\n\n self.selected_id = -1\n self.selected_storage_id = -1\n self.database_view.clicked.connect(self.saveDataRowID)\n self.storage_view.clicked.connect(self.saveStorageRowID)\n\n def resizeAll(self):\n \"\"\"\n Function for resizing all necessary windows\n \"\"\"\n self.database_view.resizeColumnsToContents()\n self.storage_view.resizeColumnsToContents()\n\n def addModels(self, database_model, storage_model):\n \"\"\"\n Add models to DatabViewTab after initialisation\n \"\"\"\n self.database_view.setModel(database_model)\n self.storage_view.setModel(storage_model)\n self.resizeAll()\n\n def updateDatabaseModel(self):\n \"\"\"\n Function for updating a the databaseModel to correspond to the database.\n \"\"\"\n self.database_view.model().fetchDataFromDB()\n self.database_view.resizeColumnsToContents()\n\n def addRowToStorage(self, data):\n \"\"\"\n Function for adding a new row to the Storage Model.\n \"\"\"\n self.storage_view.model().insertNewDataRow(data)\n\n def pushStorageModelToDatabase(self):\n \"\"\"\n Function for pushing storage data to the database.\n \"\"\"\n self.storage_view.model().pushDataToDatabase()\n self.storage_view.resizeColumnsToContents()\n self.updateDatabaseModel()\n\n def saveDataRowID(self, index):\n \"\"\"\n Function for saving the database id of the clicked data row\n \"\"\"\n if index.isValid():\n #modifiers = QApplication.keyboardModifiers()\n #if modifiers == Qt.ControlModifier:\n self.addIdToSelection(index.sibling(index.row(), 0).data())\n self.database_view.model().sort(0)\n\n def saveStorageRowID(self, index):\n \"\"\"\n Save a storage id of a station\n \"\"\"\n if index.isValid():\n self.selected_storage_id = index.row()\n\n def addIdToSelection(self, selected_id):\n \"\"\"\n Function that needs to be overridden\n \"\"\"\n raise Exception(\"Override this function\")\n\n def updateView(self):\n \"\"\"\n Function for updating views of this tab\n \"\"\"\n self.database_view.model().updateTab()\n self.storage_view.model().updateTab()\n\n def removeSelectedFromStorage(self):\n \"\"\"\n Remove selected id from storage\n \"\"\"\n self.storage_view.model().removeSelectedFromTable(self.selected_storage_id)\n\nclass DataViewTabButtons(QWidget):\n \"\"\"\n Class for handling all dataview tab button operations\n \"\"\"\n def __init__(self, buttons):\n QWidget.__init__(self)\n self.layout = QHBoxLayout()\n self.buttons = buttons\n\n for button in buttons:\n button.setFixedWidth(100)\n self.layout.addWidget(button)\n\n self.setLayout(self.layout)\n self.layout.setAlignment(Qt.AlignRight)\n\n","sub_path":"stationtool/dataViewer/dataViewTab.py","file_name":"dataViewTab.py","file_ext":"py","file_size_in_byte":11507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"419388940","text":"#!/usr/bin/env python3\n\n\"\"\"\n\n\"\"\"\nimport sys\nimport os # system functions\nimport re\n\nimport argparse\nimport _utilities as util\nimport cenc\nimport freesurfer as tic_freesurfer\nimport json \nimport labels\n\nimport nipype.interfaces.fsl as fsl\nimport nipype.interfaces.freesurfer as fs \nfrom nipype.pipeline.engine import Workflow, Node\n\nimport recon_stats \n\n#----------------------------------------------------------------------------------------------------------------------\n#\n#\n\n\ndef prepare( input_dir ):\n\n cenc_dirs = cenc.directories( input_dir )\n\n util.mkcd_dir( [ cenc_dirs['freesurfer']['input'] ])\n\n input_files = [ os.path.join( cenc_dirs['cenc']['reorient'], 't1w.nii.gz') ]\n\n# Preliminary testing of Freesurfer with the T2FLAIR and T2_TSE has not been very successful.\n# Removing them from the workflow. \n#\n# input_files = [ os.path.join( cenc_dirs['cenc']['reorient'], 't2flair.nii.gz'),\n# os.path.join( cenc_dirs['cenc']['reorient'], 't2tse.nii.gz')\n# ]\n\n util.link_inputs( input_files, cenc_dirs['freesurfer']['input'] )\n\n return\n\n\n#----------------------------------------------------------------------------------------------------------------------\n# Deprecated function use imcollective_freesurfer.methods_recon_all()\n#\n\ndef recon_all( input_dir ):\n\n cenc_dirs = cenc.directories( input_dir )\n\n freesurfer_command = [ 'recon-all', '-sd', cenc_dirs['cenc']['freesurfer_subjects_dir'],'-subjid', cenc_dirs['cenc']['id'],\n '-all', '-i', 't1w.nii.gz',\n '-qcache',\n '-measure', 'thickness',\n '-measure', 'curv',\n '-measure', 'sulc',\n '-measure', 'area',\n '-measure', 'jacobian_white'\n ]\n\n util.iw_subprocess( freesurfer_command, True, True, True)\n\n return\n\n\ndef results( input_dir, verbose):\n\n cenc_dirs = cenc.directories( input_dir )\n cenc_freesurfer_dir = cenc_dirs['freesurfer']['mri']\n\n util.mkcd_dir( [ cenc_dirs['results']['dirs']['root'],\n cenc_dirs['results']['dirs']['images'],\n cenc_dirs['results']['dirs']['labels'] \n ],\n True \n )\n\n files_to_convert = [ os.path.join( cenc_freesurfer_dir, 'nu.mgz'),\n os.path.join( cenc_freesurfer_dir, 'aseg.mgz'),\n os.path.join( cenc_freesurfer_dir, 'brainmask.mgz'),\n os.path.join( cenc_freesurfer_dir, 'aparc.a2009s+aseg.mgz'),\n os.path.join( cenc_freesurfer_dir, 'wmparc.mgz')\n ]\n\n\n # Check if files exist\n\n if util.check_files(files_to_convert, True) == False:\n sys.exit()\n\n # Create link to directory\n\n if not os.path.exists(cenc_dirs['freesurfer']['results']):\n util.force_symbolic_link( cenc_dirs['freesurfer']['mri'], cenc_dirs['freesurfer']['results'])\n\n # TODO use input node to run this instead of a loop. The trick part is to have the files named correctly. \n\n for ii in files_to_convert:\n mc = fs.MRIConvert( in_file = ii,\n out_file = os.path.join( cenc_dirs['results']['dirs']['labels'],\n str.replace( os.path.basename(ii),'.mgz','.nii.gz')),\n out_type = 'niigz'\n )\n mc.run()\n\n \n reorient = fsl.Reorient2Std( in_file = mc.inputs.out_file, out_file = mc.inputs.out_file)\n reorient.run()\n\n # Link nu.nii.gz to results/native/images\n\n result_files = [ [ os.path.join( cenc_dirs['results']['dirs']['labels'], 'nu.nii.gz'), \n os.path.join( cenc_dirs['results']['dirs']['images'], 'nu.nii.gz') ]\n ]\n \n for ii in result_files:\n util.force_hard_link( ii[0], ii[1])\n\n \n # Create final brain mask. \n\n cenc.create_mask( os.path.join( cenc_dirs['results']['dirs']['labels'], 'brainmask.nii.gz'), \n os.path.join( cenc_dirs['results']['dirs']['labels'], 'aparc.a2009s+aseg.nii.gz'), \n os.path.join( cenc_dirs['results']['dirs']['labels'], 'mask.nii.gz')\n )\n\n\n # Create macroscopic labels\n\n labels.extract(os.path.join( cenc_dirs['results']['dirs']['labels'], 'aseg.nii.gz'), [], \n [10, 11, 12, 13, 17, 18, 49, 50, 51, 52, 53, 54], \n os.path.join( cenc_dirs['results']['dirs']['labels'],'gm.subcortical.nii.gz' ), merge=1)\n \n labels.extract(os.path.join( cenc_dirs['results']['dirs']['labels'], 'aseg.nii.gz'), [], [3,42], \n os.path.join( cenc_dirs['results']['dirs']['labels'],'gm.cerebral_cortex.nii.gz' ), merge=1)\n\n labels.extract(os.path.join( cenc_dirs['results']['dirs']['labels'], 'aseg.nii.gz'), [], [8, 47], \n os.path.join( cenc_dirs['results']['dirs']['labels'],'gm.cerebellum.nii.gz'), merge=1)\n\n labels.extract(os.path.join( cenc_dirs['results']['dirs']['labels'], 'aseg.nii.gz'), [], [2, 41], \n os.path.join( cenc_dirs['results']['dirs']['labels'],'wm.cerebral.nii.gz'), merge=1)\n\n labels.extract(os.path.join( cenc_dirs['results']['dirs']['labels'], 'aseg.nii.gz'), [], [7,46], \n os.path.join( cenc_dirs['results']['dirs']['labels'],'wm.cerebellum.nii.gz'), merge=1)\n\n labels.extract(os.path.join( cenc_dirs['results']['dirs']['labels'], 'aseg.nii.gz'), [], [4,43], \n os.path.join( cenc_dirs['results']['dirs']['labels'],'ventricles.nii.gz'), merge=1)\n\n # Brain extraction nu.nii.gz\n util.iw_subprocess(['fslmaths', \n os.path.join( cenc_dirs['results']['dirs']['images'], 'nu.nii.gz'),\n '-mas', \n os.path.join( cenc_dirs['results']['dirs']['labels'], 'mask.nii.gz'),\n os.path.join( cenc_dirs['results']['dirs']['images'], 'nu_brain.nii.gz')])\n\n\n\ndef status_run( input_dir, verbose ):\n\n cenc_dirs = cenc.directories( input_dir )\n\n result_files = [ os.path.join( cenc_dirs['freesurfer']['mri'], 'wmparc.mgz') ]\n freesurfer_status_run = util.check_files(result_files, False)\n\n if verbose:\n print( cenc_dirs['cenc']['id'] + ', cenc_freesurfer, run, ' + str(freesurfer_status_run) )\n\n return freesurfer_status_run\n\n\ndef status_results( input_dir, verbose ):\n\n cenc_dirs = cenc.directories( input_dir )\n\n result_files = [ os.path.join( cenc_dirs['results']['dirs']['labels'], 'gm.cerebral_cortex.nii.gz' ),\n os.path.join( cenc_dirs['results']['dirs']['labels'], 'gm.cerebellum.nii.gz'),\n os.path.join( cenc_dirs['results']['dirs']['labels'], 'wm.cerebral.nii.gz' ),\n os.path.join( cenc_dirs['results']['dirs']['labels'], 'wm.cerebellum.nii.gz' ),\n os.path.join( cenc_dirs['results']['dirs']['labels'], 'ventricles.nii.gz' ),\n os.path.join( cenc_dirs['results']['dirs']['labels'], 'nu.nii.gz' ),\n os.path.join( cenc_dirs['results']['dirs']['images'], 'nu.nii.gz' ),\n os.path.join( cenc_dirs['results']['dirs']['images'], 'nu_brain.nii.gz' )\n ]\n\n freesurfer_status_results = util.check_files(result_files, False)\n\n if verbose:\n print( cenc_dirs['cenc']['id'] + ', cenc_freesurfer, results, ' + str(freesurfer_status_results) )\n\n return freesurfer_status_results\n\n\ndef main():\n ## Parsing Arguments\n #\n #\n\n usage = \"usage: %prog [options] arg1 arg2\"\n\n parser = argparse.ArgumentParser(prog='cenc_freesurfer')\n\n parser.add_argument('-i', \"--in_dir\", help=\"Participant directory\", default=os.getcwd())\n\n parser.add_argument(\"--prepare\", help=\"Gather necessary inputs for Freesurfer analysis\", action=\"store_true\",\n default=False)\n parser.add_argument(\"--methods\", help=\"Freesurfer methods\", nargs='*', choices=['recon-all', 'edit_pial'],\n default=[None])\n\n parser.add_argument(\"--qm\", help=\"QA methods\", nargs='*', choices=['recon-all', 'edit_pial'],\n default=[None])\n\n parser.add_argument(\"--results\", help=\"Gather Freesurfer results\", action=\"store_true\", default=False)\n\n parser.add_argument(\"--results_force\", help=\"Gather Freesurfer results\", action=\"store_true\", default=False)\n\n parser.add_argument(\"--redcap\", help=\"Upload Freesurfer results to Red Cap\", action=\"store_true\", default=False)\n\n parser.add_argument(\"--status\", help=\"Status check. choices=['run', 'results']\", nargs='*',\n choices=['results', 'run', 'all'], default=[None])\n\n # parser.add_argument(\"--status\", help=\"Check Freesurfer status\", action=\"store_true\", default=False )\n # parser.add_argument(\"--status_run\", help=\"Check Freesurfer run status\", action=\"store_true\", default=False )\n # parser.add_argument(\"--status_results\", help=\"Check Freesurfer results status\", action=\"store_true\", default=False )\n\n parser.add_argument('-v', '--verbose', help=\"Verbose flag\", action=\"store_true\", default=False)\n parser.add_argument('--debug', help=\"Debug flag\", action=\"store_true\", default=False)\n\n inArgs = parser.parse_args()\n\n #\n\n cenc_dirs = cenc.directories(inArgs.in_dir)\n\n fs_info = tic_freesurfer.get_info(cenc_dirs['cenc']['id'],\n cenc_dirs['freesurfer']['subjects_dir'],\n cenc_dirs['freesurfer']['t1w'],\n cenc_dirs['freesurfer']['t2w'],\n cenc_dirs['freesurfer']['flair']\n )\n\n if inArgs.debug:\n print(json.dumps(fs_info, indent=4, ensure_ascii=True, sort_keys=False))\n\n #\n # Prepare\n #\n\n if inArgs.prepare:\n prepare(inArgs.in_dir)\n\n #\n # Methods\n #\n\n if 'recon-all' in inArgs.methods:\n tic_freesurfer.methods_recon_all(fs_info, inArgs.verbose)\n\n if 'edit_pial' in inArgs.methods:\n tic_freesurfer.methods_recon_edit_pial(fs_info, inArgs.verbose)\n\n #\n # QA\n #\n\n if 'mri' in inArgs.qm:\n tic_freesurfer.qa_methods_mri(fs_info, inArgs.verbose)\n\n if 'edit_pial' in inArgs.qm:\n tic_freesurfer.qa_methods_edit_pial(fs_info, inArgs.verbose)\n\n #\n # Results\n #\n\n\n if inArgs.results:\n\n if status_run(inArgs.in_dir, False):\n\n if not status_results(inArgs.in_dir, False) or inArgs.results_force:\n results(inArgs.in_dir, inArgs.verbose)\n else:\n print(cenc_dirs['cenc']['id'] + ': cenc_freesurfer.py --results has already been run')\n sys.exit()\n else:\n print(cenc_dirs['cenc']['id'] + ': cenc_freesurfer.py --run has not completed or still needs to be run')\n sys.exit()\n\n #\n # Status\n #\n\n if 'run' in inArgs.status or 'all' in inArgs.status:\n status_run(inArgs.in_dir, True)\n\n if 'results' in inArgs.status or 'all' in inArgs.status:\n status_results(inArgs.in_dir, True)\n\n\n # RedCAP\n\n if inArgs.redcap:\n s = recon_stats.Subject(cenc_dirs['cenc']['id']) # where SUBJECTID is an identifier for a subject living in SUBJECTS_DIR\n # print(dir(s))\n s.get_measures()\n # data = s.upload_dict()\n # print(data)\n\n \n\n#\n# Main Function\n#\n\nif __name__ == \"__main__\":\n sys.exit(main())\n\n","sub_path":"cenc/cenc_freesurfer.py","file_name":"cenc_freesurfer.py","file_ext":"py","file_size_in_byte":11902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"270346471","text":"import numpy as np\n\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\ndef sigmoidPrime(x):\n return x*(1-x)\n\ndef model(layer):\n weights = []\n for i in range(1, len(layer)):\n w = np.random.randn(layer[i], layer[i-1])/ np.sqrt(layer[i-1])\n weights.append(w)\n \n bias = []\n for i in range(1, len(layer)):\n bias.append(np.zeros((1, layer[i])))\n return weights, bias\n\ndef feedforward(x, weights, bias):\n a_collection = [x]\n a = x\n for i in range(len(weights)-1):\n z = np.dot(a, weights[i].T) + bias[i]\n a = sigmoid(z)\n a_collection.append(a)\n a_collection.append(np.dot(a, weights[-1].T) + bias[-1])\n\n return a_collection\n\ndef backpropagation(y, vw_old_collection, vb_old_collection, a_collection, weights, bias, alpha=1, learning_rate=1, l2_lambda=0):\n error = y - a_collection[-1]\n delta_collection = [error]\n \n for i in range(len(a_collection)-2, 0, -1):\n delta = np.dot(delta_collection[-1], weights[i])*sigmoidPrime(a_collection[i])\n delta_collection.append(delta)\n \n delta_collection.reverse()\n\n vw_collection = []\n vb_collection = []\n for i in range(len(weights)):\n gradw = np.dot(delta_collection[i].T, a_collection[i])\n gradb = np.sum(delta_collection[i], axis=0)\n vw = alpha*vw_old_collection[i] + learning_rate*gradw\n vb = alpha*vb_old_collection[i] + learning_rate*gradb\n vw_collection.append(vw)\n vb_collection.append(vb)\n\n weights[i] += vw + l2_lambda*weights[i]\n bias[i] += vb\n return weights, bias, vw_collection, vb_collection\n\ndef loss(y, pred):\n return np.mean(np.square(y - pred))","sub_path":"nn_momentum2.py","file_name":"nn_momentum2.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"226204658","text":"from alife.util.general import load_obj\nimport sys\nimport os\nimport csv\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python {} \".format(sys.argv[0]))\n else:\n infn = sys.argv[1]\n inbase = os.path.basename(infn)\n outfn = inbase.split('.')[0] + '.csv'\n\n gpes = load_obj(infn)\n with open(outfn, 'wb') as outfile:\n writer = csv.writer(outfile)\n writer.writerow(['trait', 'time_step', 't1', 't2', 't3', 'total'])\n for trait, series in gpes.items():\n for step,term_list in enumerate(series):\n writer.writerow([trait, step]+list(term_list))\n \n\n","sub_path":"alife/traits/gpes_to_csv.py","file_name":"gpes_to_csv.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"364821995","text":"import urllib.request\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport math\nimport numpy as np\nimport datetime\nfrom geopy.geocoders import Nominatim\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nfrom colorsys import hsv_to_rgb\nfrom matplotlib.colors import rgb2hex\nSEGMENTS = 100\n\n# draw plots inline rather than in a seperate window\n# %matplotlib inline\n# draw plots bigger\nplt.rcParams[\"figure.figsize\"] = [20.0, 10.0]\n\nbot_user='BioPathBot'\npassw='chkiroju'\nbaseurl='http://wikipast.epfl.ch/wikipast/'\nsummary='Wikipastbot update'\nprotected_logins=[\"Frederickaplan\",\"Maud\",\"Vbuntinx\",\"Testbot\",\"IB\",\"SourceBot\",\"PageUpdaterBot\",\"Orthobot\",\"BioPathBot\",\"ChronoBOT\",\"Amonbaro\",\"AntoineL\",\"AntoniasBanderos\",\"Arnau\",\"Arnaudpannatier\",\"Aureliver\",\"Brunowicht\",\"Burgerpop\",\"Cedricviaccoz\",\"Christophe\",\"Claudioloureiro\",\"Ghislain\",\"Gregoire3245\",\"Hirtg\",\"Houssm\",\"Icebaker\",\"JenniCin\",\"JiggyQ\",\"JulienB\",\"Kl\",\"Kperrard\",\"Leandro Kieliger\",\"Marcus\",\"Martin\",\"MatteoGiorla\",\"Mireille\",\"Mj2905\",\"Musluoglucem\",\"Nacho\",\"Nameless\",\"Nawel\",\"O'showa\",\"PA\",\"Qantik\",\"QuentinB\",\"Raphael.barman\",\"Roblan11\",\"Romain Fournier\",\"Sbaaa\",\"Snus\",\"Sonia\",\"Tboyer\",\"Thierry\",\"Titi\",\"Vlaedr\",\"Wanda\"]\ndepuis_date='2016-05-02T16:00:00Z'\nliste_pages=[]\nfor user in protected_logins:\n result=requests.post(baseurl+'api.php?action=query&list=usercontribs&ucuser='+user+'&format=xml&ucend='+depuis_date)\n soup=BeautifulSoup(result.content,'lxml')\n for primitive in soup.usercontribs.findAll('item'):\n title = primitive['title']\n if 'Fichier' not in title and 'BioPathBot' not in title:\n liste_pages.append(primitive['title'])\n\nnames=list(set(liste_pages))\n\"\"\"for title in names:\n print(title)\"\"\"\n\n# Login request\npayload={'action':'query','format':'json','utf8':'','meta':'tokens','type':'login'}\nr1=requests.post(baseurl + 'api.php', data=payload)\n\n#login confirm\nlogin_token=r1.json()['query']['tokens']['logintoken']\npayload={'action':'login','format':'json','utf8':'','lgname':bot_user,'lgpassword':passw,'lgtoken':login_token}\nr2=requests.post(baseurl + 'api.php', data=payload, cookies=r1.cookies)\n\n#get edit token2\nparams3='?format=json&action=query&meta=tokens&continue='\nr3=requests.get(baseurl + 'api.php' + params3, cookies=r2.cookies)\nedit_token=r3.json()['query']['tokens']['csrftoken']\n\nedit_cookie=r2.cookies.copy()\nedit_cookie.update(r3.cookies)\n\n#setup geolocator\ngeolocator = Nominatim(timeout=30)\n\n\n# upload config\ndef uploadMap(filename):\n\n # read local file\n upload_file = open(filename,\"rb\")\n upload_contents = upload_file.read()\n upload_file.close()\n\n # setting parameters for upload\n # ref: https://www.mediawiki.org/wiki/API:Upload\n payload={'action':'upload','filename':filename, 'ignorewarnings':1, 'token':edit_token}\n files={'file':upload_contents}\n\n # upload the image\n print(\"Uploading file to %s via API...\" % (baseurl+\"index.php/Fichier:\"+filename))\n r4=requests.post(baseurl+'api.php',data=payload,files=files,cookies=edit_cookie)\n\n # in case of error print the response\n # print(r4.text)\n\n# add link to biopath in original page if not already existing\ndef addLinkToOriginalPage(name):\n\n result=requests.post(baseurl+'api.php?action=query&titles='+name+'&export&exportnowrap')\n soup=BeautifulSoup(result.text, \"lxml\")\n #soup=BeautifulSoup(result.text)\n code=''\n for primitive in soup.findAll(\"text\"):\n code+=primitive.string\n\n exist = re.findall(\"(\\[\\[\"+name+\" BioPathBot\\]\\])\",code)\n if(len(exist)==0):\n title = name\n content = \"\\n\\n\"+\"[[\"+name+\" BioPathBot]]\"\n requests.post(baseurl+'api.php?action=query&titles='+title+'&export&exportnowrap')\n payload={'action':'edit','assert':'user','format':'json','utf8':'','appendtext':content,'summary':summary,'title':title,'token':edit_token}\n r4=requests.post(baseurl+'api.php',data=payload,cookies=edit_cookie)\n\naddLinkToOriginalPage(\"Jean Tinguely\")\n\ndef addToPage(name, img):\n title = name + \" BioPathBot\"\n content = \"[[Fichier: \"+ img +\"]]\"\n pageToChange = requests.post(baseurl+'api.php?action=query&titles='+title+'&export&exportnowrap')\n payload={'action':'edit','assert':'user','format':'json','utf8':'','text':content,'summary':summary,'title':title,'token':edit_token}\n r4=requests.post(baseurl+'api.php',data=payload,cookies=edit_cookie)\n print(r4.text)\n\n# BioPathBot : add line of databiographie to the right page (time and space)\ndef getDataFromPage(name):\n data = []\n dates = []\n places = []\n print(\"Page Created: \" + name)\n result=requests.post(baseurl+'api.php?action=query&titles='+name+'&export&exportnowrap')\n soup=BeautifulSoup(result.text, \"lxml\")\n #soup=BeautifulSoup(result.text)\n code=''\n for primitive in soup.findAll(\"text\"):\n if primitive.string:\n code+=primitive.string\n\n # split on list (*)\n lines = code.split(\"*\")\n for line in lines :\n\n # add breaking lines (otherwise will be appened directly in one line)\n line = \"\\n\\n\"+line\n\n # get date if exist\n date = re.findall(\"((?<=\\[\\[)\\d*(\\.*\\d*\\.*\\d*)*(?=\\]\\]))\",line)\n dateToAdd = \"\"\n\n if len(date) != 0 :\n dateToAdd = date[0][0]\n dates.append(dateToAdd)\n\n # get place if exist\n place = re.findall(\"(?<=\\/\\s\\[\\[)[A-zÀ-ÿ\\s]*(?=\\]\\])\",line)\n if(len(place)==0):\n place = re.findall(\"(?<=\\/\\[\\[)[A-zÀ-ÿ\\s]*(?=\\]\\])\",line)\n location = \"\"\n if len(place) != 0:\n placeToAdd = place[0]\n places.append(placeToAdd)\n location = geolocator.geocode(placeToAdd)\n if location:\n print(\"Location: \" + placeToAdd + \" : \" + str(location.longitude) + \",\" + str(location.latitude))\n\n # if both the date and the location are available, append in data array\n if dateToAdd and location:\n dataToAdd = [location.longitude,location.latitude];\n data.append(dataToAdd);\n\n # stop getting data if find [[Décès]]\n foundDeces = re.findall(\"(\\[\\[Décès*\\]\\] de \\[\\[\"+name+\")\",line)\n if(len(foundDeces) != 0):\n break\n return [data, dates, places]\n\n# finds the minimal and maximal longitude and latitude\ndef findCorners(pts):\n minlon = maxlon = pts[0][0]\n minlat = maxlat = pts[0][1]\n for p in pts:\n currlon = p[0]\n if currlonmaxlon:\n maxlon = currlon\n\n currlat = p[1]\n if currlatmaxlat:\n maxlat = currlat\n\n return [minlon, maxlon, minlat, maxlat]\n\n# draws the map, some points and the lines\ndef drawmap(pts, dates, places, filename, export=False):\n n_pts = len(pts)\n corners = findCorners(pts)\n txt = \"\"\n m = Basemap(llcrnrlon=corners[0]-1, llcrnrlat=corners[2]-1, urcrnrlon=corners[1]+1, urcrnrlat=corners[3]+1, resolution='i')\n m.drawmapboundary(fill_color='0.6')\n m.drawcountries(linewidth=1.0, color='0.6')\n m.fillcontinents(color='white', lake_color='white')\n for i in range(n_pts-1): # draw lines\n for j in range(SEGMENTS):\n start = pts[i] + (pts[i+1]-pts[i])*(j/SEGMENTS)\n end = pts[i] + (pts[i+1]-pts[i])*((j+1)/SEGMENTS)\n m.plot([start[0], end[0]], [start[1], end[1]], color=hsv_to_rgb((i+j/SEGMENTS)/n_pts, 1, 1))\n for i in range(n_pts): # draw points\n curr_color = hsv_to_rgb(i/n_pts, 1, 1)\n m.plot(pts[i][0], pts[i][1], marker='o', color=curr_color, fillstyle='full', markeredgewidth=0.0)\n txt += \"\" + dates[i] + \" / \" + places[i] + \".
\"\n if export:\n plt.savefig(filename, bbox_inches='tight')\n # plt.show()\n return txt\nnames = [\"Franz Beckenbauer\"]\nfor name in names:\n image_filename = (name + \"_biopath.png\").replace(\" \",\"_\")\n data = getDataFromPage(name)\n print(data)\n if len(data[0]) != 0:\n legend = drawmap(np.array(data[0]), data[1], data[2], image_filename, True)\n uploadMap(image_filename)\n addToPage(name, image_filename)\n addLinkToOriginalPage(name)\n","sub_path":"biopathbot2.py","file_name":"biopathbot2.py","file_ext":"py","file_size_in_byte":8272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"560149098","text":"import numpy as np\nimport cv2\nfrom sklearn.preprocessing import normalize\n\ndef display_OpenCV_image(cv2Img):\n #For display image with one line code.\n cv2.imshow('My Image', cvImg)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n imgL = cv2.imread('./StereoMatchingTestings/Art/view1.png')\n imgR = cv2.imread('./StereoMatchingTestings/Art/view5.png')\n\n imgL_new = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)\n imgR_new = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)\n\n #display_OpenCV_image(imgL)\n #display_OpenCV_image(imgR)\n #display_OpenCV_image(imgL_new)\n #display_OpenCV_image(imgR_new)\n\n # SGBM Parameters -----------------\n window_size = 3 # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n\n left_matcher = cv2.StereoSGBM_create(\n minDisparity=0,\n numDisparities=160, # max_disp has to be dividable by 16 f. E. HH 192, 256\n blockSize=5,\n P1=8 * 3 * window_size ** 2,\n # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely\n P2=32 * 3 * window_size ** 2,\n disp12MaxDiff=1,\n uniquenessRatio=15,\n speckleWindowSize=0,\n speckleRange=2,\n preFilterCap=63,\n mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY\n )\n\n right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)\n\n # FILTER Parameters\n lmbda = 80000\n sigma = 1.2\n visual_multiplier = 1.0\n\n wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)\n wls_filter.setLambda(lmbda)\n wls_filter.setSigmaColor(sigma)\n\n print('computing disparity...')\n displ = left_matcher.compute(imgL, imgR) # .astype(np.float32)/16\n dispr = right_matcher.compute(imgR, imgL) # .astype(np.float32)/16\n displ = np.int16(displ)\n dispr = np.int16(dispr)\n filteredImg = wls_filter.filter(displ, imgL, None, dispr) # important to put \"imgL\" here!!!\n\n filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX);\n filteredImg = np.uint8(filteredImg)\n cv2.imshow('Disparity Map', filteredImg)\n cv2.waitKey()\n cv2.destroyAllWindows()\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"224842256","text":"# IMPORTING IMPORTANT LIBRARIES\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport math\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation\nfrom keras.layers import LSTM\nimport preprocessing \nfrom statsmodels.tsa.stattools import adfuller\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n# FOR REPRODUCIBILITY\nnp.random.seed(7)\n\n# IMPORTING DATASET \ndataset = pd.read_csv('../Data/apple_share_price.csv', usecols=[1,2,3,4])\n# print(dataset.head())\n# print(dataset.index[::-1])\ndataset = dataset.reindex(index = dataset.index[::-1])\n# print(dataset.head())\n\n\n# CREATING OWN INDEX FOR FLEXIBILITY\nobs = np.arange(1, len(dataset) + 1, 1)\n\n# TAKING DIFFERENT INDICATORS FOR PREDICTION\nOHLC_avg = dataset.mean(axis = 1)\n# OHLC_avg = dataset\n# print(OHLC_avg)\n\n# HLC_avg = dataset[['High', 'Low', 'Close']].mean(axis = 1)\n# print(HLC_avg)\n# close_val = dataset[['Close']]\n# print(close_val.head())\n# exit(0)\n\n\n# PLOTTING ALL INDICATORS IN ONE PLOT\n# plt.plot(obs, OHLC_avg, 'r', label = 'OHLC avg')\n# plt.plot(obs, HLC_avg, 'b', label = 'HLC avg')\n# plt.plot(obs, close_val, 'g', label = 'Closing price')\n# plt.legend(loc = 'upper right')\n# plt.show()\n\n#Augmented Dickey Fuller Test for Stationarity\n\nfig,axes = plt.subplots(2,2,sharex=True)\nOHLC_avg\naxes[0, 0].plot(obs,OHLC_avg); axes[0, 0].set_title('Original Series')\nplot_acf(OHLC_avg, ax=axes[0,1],lags=100)\n# result = adfuller(OHLC_avg.values)\n# print('ADF Statistics: %f' % result[0])\n# print('p-value: %f' % result[1])\n\nb = obs[1:]\n\naxes[1, 0].plot(obs,OHLC_avg.diff()); axes[1, 0].set_title('Diff Series')\nplot_acf(OHLC_avg.diff(), ax=axes[1,1], lags=100)\nplt.show()\n\n# First order difference\n# print(type(OHLC_avg))\nOHLC_avg = np.reshape(OHLC_avg.values, (len(OHLC_avg),1)) # 1664\ndiff = []\nfor i in range(OHLC_avg.shape[0]-1):\n diff.append(OHLC_avg[i+1]-OHLC_avg[i])\n\ndiff = np.asarray(diff)\ndiff = diff.reshape((diff.shape[0],))\n# print(diff.shape)\nresult = adfuller(diff)\nprint('ADF Statistics: %f' % result[0])\nprint('p-value: %f' % result[1])\nexit(0)\n\n# plt.plot(obs, OHLC_avg, 'r', label = 'OHLC avg')\nplt.plot(b, diff, 'b', label = 'diff')\nplt.show()\nexit(0)\n# print(OHLC_avg)\nscaler = MinMaxScaler(feature_range=(0, 1))\nOHLC_avg = scaler.fit_transform(OHLC_avg)\n# print(OHLC_avg)\n\n","sub_path":"AR_Model/AR_Stock_Price_Prediction.py","file_name":"AR_Stock_Price_Prediction.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"523349835","text":"#! /usr/bin/env python3\n\nimport time\nimport random\nimport os\n\n\nclass Unit:\n \"\"\"\n Abstract class with basic unit methods.\n\n :arg health, :type int\n :arg recharge, :type int\n \"\"\"\n last_attack_time = 0\n active = True\n\n def __init__(self, **kwargs):\n self.health = kwargs['health']\n # делю больше чем на 1к, чтобы быстрее был результат сражения\n # отношения задержек между юнитами всё равно одинаковые\n self.recharge = kwargs['recharge'] / 10000\n\n def take_damage(self, dmg):\n \"\"\"\n Decreases self.health by dmg and changes status of unit if dead.\n\n :param dmg: taken damage.\n \"\"\"\n self.health -= dmg\n if self.health <= 0:\n self.active = False\n\n def get_recharge(self):\n \"\"\"\n :return: leftover recharge.\n \"\"\"\n return self.recharge - (time.time() - self.last_attack_time)\n\n def do_attack(self):\n \"\"\"\n Checks if unit can attack, updates last attack time.\n\n :return: True if attack successful, False if not.\n \"\"\"\n if self.get_recharge() <= 0 and self.active is True:\n self.last_attack_time = time.time()\n return True\n else:\n return False\n\n def get_health(self):\n \"\"\"\n :return: health of unit.\n \"\"\"\n return self.health\n\n\nclass Soldier(Unit):\n \"\"\"\n Generates Soldier object.\n \"\"\"\n experience = 0\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def get_damage(self):\n \"\"\"\n Calculates damage, updates exp.\n \"\"\"\n if self.do_attack() is True:\n damage = (0.05 + self.experience / 100) * self.get_success()\n self.add_exp()\n return damage\n else:\n return 0\n\n def add_exp(self):\n \"\"\"If exp = 50 does nothing\"\"\"\n if self.experience < 50:\n self.experience += 1\n\n def get_success(self):\n \"\"\"\n Attack success formula\n \"\"\"\n return 0.5 * (1 + self.health / 100) * \\\n random.randint(50 + self.experience, 100) / 100\n\n\nclass Vehicle(Unit):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.operators = kwargs['operators']\n\n def get_damage(self):\n if self.do_attack() is True:\n damage = self.get_success()\n for operator in self.operators:\n operator.add_exp()\n return damage\n else:\n return 0\n\n def take_damage(self, dmg):\n # броня ослаблена в 10 раз\n # иначе когда операторы становятся слишком опытными\n # машина становится почти неуязвимой\n armor = (0.1 + sum(operator.experience for operator in self.operators) / 100) / 10\n total_damage = 0\n if dmg > armor:\n total_damage = dmg - armor\n self.health -= total_damage\n for operator in self.operators:\n operator.take_damage(total_damage * 0.1)\n if self.operators:\n random.choice(self.operators).take_damage(total_damage * 0.1)\n\n self.operator_cleanup()\n if self.health <= 0 or (bool(self.operators) is False):\n self.active = False\n\n def operator_cleanup(self):\n for operator in self.operators:\n if operator.active is False:\n self.operators.remove(operator)\n\n def get_success(self):\n try:\n return 0.5 * (1 + self.health / 100) * \\\n sum(operator.get_success() for operator in self.operators) / len(self.operators)\n except ZeroDivisionError:\n return 0\n\n\nclass Squad:\n active = True\n\n def __init__(self, **kwargs):\n self.units = kwargs['units']\n\n def get_power(self):\n return sum(unit.health for unit in self.units)\n\n def get_damage(self):\n damage = sum(unit.get_damage() for unit in self.units)\n return damage\n\n def take_damage(self, dmg):\n if len(self.units):\n personal_damage = dmg / len(self.units)\n for unit in self.units:\n unit.take_damage(personal_damage)\n self.unit_cleanup()\n if self.units is False:\n self.active = False\n\n def unit_cleanup(self):\n for unit in self.units:\n if unit.active is False:\n self.units.remove(unit)\n\n def get_success(self):\n return sum(unit.get_success() for unit in self.units)\n\n\nclass Army:\n attacks_num = 0\n\n def __init__(self, **kwargs):\n self.squads = kwargs['squads']\n self.strategy = kwargs['strategy']\n self.name = kwargs['name']\n\n def attack(self, enemy):\n if enemy.squads:\n if self.squads:\n if self.strategy == 'random':\n target = random.choice(enemy.squads)\n attacker = random.choice(self.squads)\n if attacker.get_success() > target.get_success():\n target.take_damage(attacker.get_damage())\n self.attacks_num += 1\n elif self.strategy == 'weakest':\n enemy.squads.sort(key=lambda squad: squad.get_power())\n target = enemy.squads[0]\n attacker = random.choice(self.squads)\n if attacker.get_success() >= target.get_success():\n target.take_damage(attacker.get_damage())\n self.attacks_num += 1\n elif self.strategy == 'strongest':\n enemy.squads.sort(key=lambda squad: squad.get_power(), reverse=True)\n target = enemy.squads[0]\n attacker = random.choice(self.squads)\n if attacker.get_success() > target.get_success():\n target.take_damage(attacker.get_damage())\n self.attacks_num += 1\n\n enemy.squad_cleanup()\n\n def squad_cleanup(self):\n for squad in self.squads:\n if bool(squad.units) is False:\n self.squads.remove(squad)\n\n def army_stats(self):\n stats = {}\n stats['name'] = self.name\n stats['power'] = round(sum(squad.get_power() for squad in self.squads), 2)\n stats['squads'] = len(self.squads)\n stats['soldiers'] = 0\n stats['vehicles'] = 0\n stats['attacks'] = self.attacks_num\n for squad in self.squads:\n for unit in squad.units:\n if unit.__class__.__name__ == 'Soldier':\n stats['soldiers'] += 1\n if unit.__class__.__name__ == 'Soldier':\n stats['vehicles'] += 1\n return stats\n\n\nclass Battlefield:\n last_console_update = time.time()\n battlefield_status = []\n\n def __init__(self, **kwargs):\n self.armies = kwargs['armies']\n self.debug = kwargs['debug']\n\n def start(self):\n while True:\n for army in self.armies:\n while True:\n target = random.choice(self.armies)\n if target != army:\n break\n army.attack(target)\n self.army_cleanup()\n if self.debug is 'on':\n self.print_stats(self.armies)\n if len(self.armies) is 1:\n return self.armies[0].name + ' won!'\n\n def army_cleanup(self):\n for army in self.armies:\n if bool(army.squads) is False:\n self.armies.remove(army)\n\n def print_stats(self, armies):\n if time.time() - self.last_console_update > 0.3:\n os.system('cls' if os.name == 'nt' else 'clear')\n self.last_console_update = time.time()\n for army in armies:\n print(army.name, 'army stats:')\n print(' Total power: {0}, squads: {1}, soldiers: {2}, vehicles: {3}, attacks: {4}'.format(\n army.army_stats()['power'],\n army.army_stats()['squads'],\n army.army_stats()['soldiers'],\n army.army_stats()['vehicles'],\n army.army_stats()['attacks']\n ))\n","sub_path":"web_battle/battle.py","file_name":"battle.py","file_ext":"py","file_size_in_byte":8373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"244855488","text":"#!/usr/bin/env python\n# vim:fileencoding=utf-8\n# from __future__ import unicode_literals\n#\nfrom django.contrib.auth.models import User, Group, Permission\nfrom django.core.management.base import BaseCommand,CommandError\n\nfrom app1 import *\n\n\nclass Command(BaseCommand):\n args = 'Model'\n help = 'Json Ausgabe der Felder einer Tabelle'\n\n def add_arguments(self, parser):\n parser.add_argument('mod')\n\n\n def handle(self, *args, **options):\n\n objekte=[f.name for f in eval(options['mod'].title())._meta.get_fields()]\n\n if objekte:\n print (objekte)\n else:\n raise CommandError(options['mod']+ \"gibt's nicht\")","sub_path":"management/commands/felder.py","file_name":"felder.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"586112752","text":"from arcpy import GetParameterAsText\r\nfrom esri2geo import prepareGeoJson,writeFile,closeUp\r\nfeatures = GetParameterAsText(0).split(\";\")\r\noutJSON=GetParameterAsText(1)\r\nincludeGeometry = True\r\nfileType = \"geojson\"\r\nout=open(outJSON,\"wb\")\r\nprepareGeoJson(out)\r\nfirst=True\r\nfor feature in features:\r\n if feature[0] in (\"'\",'\"'):\r\n feature = feature[1:-1]\r\n writeFile(out,feature,fileType,includeGeometry, first)\r\n first=False\r\ncloseUp(out,fileType)","sub_path":"merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"397308000","text":"# source code\r\nimport pandas as pd\r\nfrom sklearn import datasets\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndata = datasets.load_boston()\r\ndfX = pd.DataFrame(data.data, columns=data.feature_names)\r\ndfX = dfX.drop(['CHAS', 'RAD'], axis=1)\r\ndfY = pd.DataFrame(data.target, columns=['MEDV'])\r\nregr = LinearRegression()\r\n\r\n##########################################################################\r\n# practice 2\r\nimport numpy as np\r\nfrom sklearn.preprocessing import StandardScaler, scale, robust_scale\r\nimport seaborn as sns\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\n# 1.check data\r\ndfX.head() # eye-check\r\ndfY.head() # eye-check\r\n#dfX.info() # data type\r\ndfX.describe() # range, outlier\r\ndfY.describe() # range, outlier\r\n\r\n# 2. missing values\r\ndfX.isnull().sum() # none\r\ndfY.isnull().sum() # none\r\n\r\n# 3. outlier\r\ndef std_based_outlier(dfX, dfY):\r\n b = np.full(len(dfX), False)\r\n for i in range(0, len(dfX.iloc[1])):\r\n b |= (np.abs(dfX.iloc[:,i] - dfX.iloc[:,i].mean()) > 3 *dfX.iloc[:,i].std())\r\n return dfX[~b], dfY[~b]\r\n\r\ndfX, dfY = std_based_outlier(dfX, dfY)\r\n\r\n# 4. scaling (log or sqrt)\r\ndef plot_functions(df):\r\n sns.kdeplot(np.random.randn(len(df)), label='z-dist')\r\n sns.kdeplot(scale(df), label='normal')\r\n sns.kdeplot(scale(np.log(df)), label='log')\r\n sns.kdeplot(scale(np.sqrt(df)), label='sqrt')\r\n sns.kdeplot(scale(np.power(df,2)), label='x^2')\r\n sns.kdeplot(scale(np.power(df,3)), label='x^3')\r\n sns.kdeplot(robust_scale(df), label='robust')\r\n\r\nplot_functions(dfX['AGE'])\r\n#dfX['ZN'] = scale(np.power(dfX['ZN'], 1/3))\r\n\r\n# 74.8\r\ndfX['CRIM'] = scale(np.log(dfX['CRIM']))\r\ndfX['RM'] = scale(np.power(dfX['RM'],3))\r\ndfX['LSTAT'] = scale(np.log(dfX['LSTAT']))\r\n\r\ndfX.describe()\r\n\r\n##########################################################################\r\n\r\nn = 1000\r\navg = 0\r\nfor i in range(n):\r\n X_train, X_test, y_train, y_test = train_test_split(dfX, dfY, train_size=0.7)\r\n regr.fit(X_train, y_train)\r\n avg+=regr.score(X_test, y_test)\r\nprint(avg/n)","sub_path":"190817_team_05_hw3_A_2.py","file_name":"190817_team_05_hw3_A_2.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"254625230","text":"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom utils.config import cfg\n\nclass RNN(nn.Module):\n def __init__(self, input_size=80, batch_size=64, hidden_size=20, num_classes=1):\n super(RNN,self).__init__()\n # self.fc1 = nn.Linear(input_size, hidden_size)\n # self.fc2 = nn.Linear(hidden_size,hidden_size)\n #self.dropout_1 = nn.Dropout(p=0.2)\n #self.dropout_2 = nn.Dropout(p=0.2)\n self.num_layers = cfg.WBDNet.num_layers\n self.hidden_dim = hidden_size\n self.lstm = nn.LSTM(input_size, hidden_size, bidirectional=True,num_layers=self.num_layers,batch_first=True)\n self.fc3 = nn.Linear(hidden_size * 2,num_classes)\n self.sigm = nn.Sigmoid()\n\n def forward(self,x):\n out,hidden = self.lstm(x)\n out = self.fc3(out).squeeze()\n # out = self.sigm(out)\n return out\n \n ","sub_path":"models/WBDNet.py","file_name":"WBDNet.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590888279","text":"from django.shortcuts import render\nfrom rest_framework import viewsets, permissions\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom .serializers import ProductSerializer\nfrom django.db.models import Q\nfrom .models import Product\n\n\ndef combineOrQuery(mainQuery, orQuery):\n mainQuery = mainQuery & orQuery\n return mainQuery\n\n\ndef makeQuery(t, f, v, mainQuery, operator):\n if (t == 'contains'):\n t = 'icontains'\n elif (t == 'equals'):\n t = 'iexact'\n elif (t == 'beginsWith'):\n t = 'istartswith'\n elif (t == 'endsWith'):\n t = 'iendswith'\n elif (t == 'greaterThanEqual'):\n t = 'gte'\n elif (t == 'lessThanEqual'):\n t = 'lte'\n elif (t == 'greaterThan'):\n t = 'gt'\n elif (t == 'lessThan'):\n t = 'lt'\n elif (t == 'matches'):\n t = 'regex'\n if v != \"\":\n kwargs = {str('%s__%s' % (f, t)): str('%s' % v)}\n if operator == \"and\":\n mainQuery = mainQuery & Q(**kwargs)\n elif operator == \"or\":\n mainQuery = mainQuery | Q(**kwargs)\n\n return mainQuery\n\n\nclass ProductViewSet(viewsets.ModelViewSet):\n queryset = Product.objects.all()\n serializer_class = ProductSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly]\n\n @action(detail=False, methods=['POST'], permission_classes=[permissions.AllowAny])\n def search(self, request, *args, **kwargs):\n mainQuery = Q()\n\n for obj in request.data[\"data\"]:\n for key in obj:\n # or condition check\n if (key == 'or'):\n orQuery = Q()\n for newObj in obj[key]:\n for newKey in newObj:\n orQuery = makeQuery(\n newKey, newObj[newKey][0], newObj[newKey][1], orQuery, \"or\")\n mainQuery = combineOrQuery(mainQuery, orQuery)\n # or condition check end\n else:\n mainQuery = makeQuery(\n key, obj[key][0], obj[key][1], mainQuery, 'and')\n queryset = Product.objects.filter(mainQuery)\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"601448928","text":"# 神经网络样例程序\n\nimport tensorflow as tf\n\nw1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1), name='w1')\nw2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1), name='w2')\n\n# 定义 placeholder 为存数据的地方, 维度不一定需定义\n# 维度确定并给出,降低出错概率\nx = tf.placeholder(tf.float32, shape=(1,2), name='input')\n\na = tf.matmul(x, w1)\ny = tf.matmul(a, w2)\n\nwith tf.Session() as sess:\n init_op = tf.global_variables_initializer()\n sess.run(init_op)\n print(sess.run(y, feed_dict={x: [[0.7, 0.9]]}))\n\nx2 = tf.placeholder(tf.float32, shape=(3, 2), name='input2')\n\na2 = tf.matmul(x2, w1)\ny2 = tf.matmul(a2, w2)\n\nwith tf.Session() as sess:\n init_op2 = tf.global_variables_initializer()\n sess.run(init_op2)\n print(sess.run(y2, feed_dict={x2: [[0.7, 0.9], [0.1, 0.4], [0.5, 0.8]]}))","sub_path":"tf_gdl_in_action/3.4.4.py","file_name":"3.4.4.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3588727","text":"#!/usr/local/bin/python\n\nimport util, constants, detection_and_classification\nimport scipy.io as sio\n\ndataset_name = 'COIL20'\nSVHN_num_classes = 20\n\n# clf_type = ['CNN', 'CAE', 'DEF_OCSVM', 'GRID_OCSVM']\nclf_type = 'CAE'\n\n \nif __name__ == '__main__':\n \n # The data, shuffled and split between train and test sets:\n # carregando os datasets\n train_data = sio.loadmat('COIL20.mat')\n \n # importando os dados de treinamento\n x_train = train_data['fea']\n y_train = train_data['gnd']\n \n #x_train = x_train[:100]\n #y_train = y_train[:100]\n \n # pre processando os dados\n X_train, Y_train, X_test, y_test = util.pre_processing_coil20(x_train, y_train, clf_type)\n \n \n #RUNNING ANOMALY DETECTION OPERATION\n ''' \n if multi_binary_clfs == True: trains a binary_clf for each class\n if multi_binary_clfs == False: trains a clf for all class, except the anomaly class\n '''\n multi_binary_clfs = False\n batch_normalization = False\n dropout = False\n \n \n if(multi_binary_clfs):\n if (clf_type=='CAE'):\n compiled_ypreds = detection_and_classification.binary_CAE_anomaly_detection(X_train, Y_train, X_test, y_test, dataset_name, SVHN_num_classes)\n elif(clf_type=='CNN'):\n compiled_ypreds = detection_and_classification.binary_cnns_anomaly_detection(X_train, Y_train, X_test, y_test, dataset_name, SVHN_num_classes)\n else: #DEF_OCSVM, GRID_OCSVM\n compiled_ypreds = detection_and_classification.multi_ocsvm_anomaly_detection(X_train, Y_train, X_test, y_test, clf_type, dataset_name, SVHN_num_classes)\n else:\n if(clf_type=='CAE'):\n \n executions = [0, 1, 2, 3, 4, 5, 6, 7, 8]\n for i in executions:\n compiled_ypreds = detection_and_classification.CAE_anomaly_detection(X_train, Y_train, X_test, y_test, dataset_name, SVHN_num_classes, i)\n \n #i = 9\n #compiled_ypreds = detection_and_classification.CAE_anomaly_detection(X_train, Y_train, X_test, y_test, dataset_name, SVHN_num_classes, i)\n \n \n elif(clf_type=='CNN'):\n compiled_ypreds = detection_and_classification.multiclass_cnn_anomaly_detection(X_train, Y_train, X_test, y_test, dataset_name, SVHN_num_classes, batch_normalization, dropout)\n else: #DEF_OCSVM, GRID_OCSVM\n compiled_ypreds = detection_and_classification.OCSVM_anomaly_detection(X_train, Y_train, X_test, y_test, clf_type, dataset_name)\n \n ''' PERFORM CLASSIFICATION '''\n perform_classification = False\n if perform_classification:\n #Reshaping data to cnn appropriate form\n if ((clf_type=='DEF_OCSVM') or (clf_type=='GRID_OCSVM')):\n X_train, Y_train = util.pre_processing_CIFAR_data_without_class(x_train, y_train, constants.ANOMALY_CLASS, True)\n #Classification:\n print('\\nPERFORMING CLASSIFICATION:')\n detection_and_classification.multiclass_classification(X_train, Y_train, X_test, y_test, compiled_ypreds, dataset_name, SVHN_num_classes)\n \n ''' END ''' \n \n ","sub_path":"anomaly_COIL20.py","file_name":"anomaly_COIL20.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"195084930","text":"# Write a small program to ask for a name and age.\n# When both values have been entered, check if the person\n# is the right age to go on an 18-30 holiday (they must be\n# over 18 and under 31).\n# If they are, welcome them to the holiday, otherwise print\n# a (polite) message refusing them entry.\n\nname = input(\"Enter your name: \")\nage = int(input(\"Enter your age, {}: \".format(name)))\n\nif 18 <= age < 31:\n print(\"Welcome to the holiday, {}!\".format(name))\nelse:\n print(\"Tough luck sucker!\")\n","sub_path":"06-flow-control/ifchallenge.py","file_name":"ifchallenge.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"571236798","text":"# -*- coding: utf-8 -*-\nimport json\nimport uuid\nimport datetime\nimport pytz\n\n__author__ = 'viruzzz-kun'\n\n\ndef string_to_datetime(date_string, formats=None):\n # TODO: Надо разобраться с магией часовых поясов.\n if formats is None:\n formats = ('%Y-%m-%dT%H:%M:%S.%fZ', '%Y-%m-%dT%H:%M:%S+00:00', '%Y-%m-%dT%H:%M:%S.%f+00:00')\n elif not isinstance(formats, (tuple, list)):\n formats = (formats, )\n\n if date_string:\n for fmt in formats:\n try:\n dt = datetime.datetime.strptime(date_string, fmt)\n break\n except ValueError:\n continue\n else:\n raise ValueError\n return pytz.timezone('UTC').localize(dt).astimezone(tz=pytz.timezone(app.config.get('TIME_ZONE', 'Europe/Moscow'))).replace(tzinfo=None)\n else:\n return date_string\n\n\ndef safe_unicode(obj):\n if obj is None:\n return None\n return unicode(obj)\n\n\ndef safe_int(obj):\n if obj is None:\n return None\n return int(obj)\n\n\ndef safe_dict(obj):\n if obj is None:\n return None\n elif isinstance(obj, dict):\n for k, v in obj.iteritems():\n obj[k] = safe_dict(v)\n return obj\n elif hasattr(obj, '__json__'):\n return safe_dict(obj.__json__())\n return obj\n\n\ndef safe_datetime(val):\n if not val:\n return None\n if isinstance(val, basestring):\n try:\n val = string_to_datetime(val)\n except ValueError:\n try:\n val = string_to_datetime(val, '%Y-%m-%d')\n except ValueError:\n return None\n return val\n elif isinstance(val, datetime.datetime):\n return val\n elif isinstance(val, datetime.date):\n return datetime.datetime(val.year, val.month, val.day)\n else:\n return None\n\n\ndef safe_date(val):\n if not val:\n return None\n if isinstance(val, basestring):\n try:\n val = string_to_datetime(val)\n except ValueError:\n try:\n val = string_to_datetime(val, '%Y-%m-%d')\n except ValueError:\n return None\n return val.date()\n elif isinstance(val, datetime.datetime):\n return val.date()\n elif isinstance(val, datetime.date):\n return val\n else:\n return None\n\n\ndef safe_time_as_dt(val):\n if not val:\n return None\n if isinstance(val, basestring):\n for fmt in ('%H:%M:%S', '%H:%M'):\n try:\n val = datetime.datetime.strptime(val, fmt)\n break\n except ValueError:\n continue\n return val\n elif isinstance(val, datetime.datetime):\n return val\n else:\n return None\n\n\ndef safe_time(val):\n if not val:\n return None\n val = safe_time_as_dt(val)\n if isinstance(val, datetime.datetime):\n return val.time()\n else:\n return None\n\n\ndef safe_traverse(obj, *args, **kwargs):\n \"\"\"Безопасное копание вглубь dict'а\n @param obj: точка входя для копания\n @param *args: ключи, по которым надо проходить\n @param default=None: возвращаемое значение, если раскопки не уда��ись\n @rtype: any\n \"\"\"\n default = kwargs.get('default', None)\n if obj is None:\n return default\n if len(args) == 0:\n raise ValueError(u'len(args) must be > 0')\n elif len(args) == 1:\n return obj.get(args[0], default)\n else:\n return safe_traverse(obj.get(args[0]), *args[1:], **kwargs)\n\n\ndef safe_traverse_attrs(obj, *args, **kwargs):\n default = kwargs.get('default', None)\n if obj is None:\n return default\n if len(args) == 0:\n raise ValueError(u'len(args) must be > 0')\n elif len(args) == 1:\n return getattr(obj, args[0], default)\n else:\n return safe_traverse_attrs(getattr(obj, args[0]), *args[1:], **kwargs)\n\n\ndef safe_bool(val):\n if isinstance(val, (str, unicode)):\n return val.lower() not in ('0', 'false', '\\x00', '')\n return bool(val)\n\n\ndef safe_uuid(val):\n if not isinstance(val, basestring):\n return None\n u_obj = uuid.UUID(val)\n return u_obj\n\n\ndef safe_hex_color(val):\n if not isinstance(val, basestring):\n return None\n if val.startswith('#') and len(val) == 7:\n return val[1:]\n\n\ndef parse_json(json_string):\n try:\n result = json.loads(json_string)\n except ValueError:\n result = None\n return result\n\n\n","sub_path":"hitsl_utils/safe.py","file_name":"safe.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"114761765","text":"# 智能信息处理专题第一次作业,汉字语句分词\n# conda activate ngp-wyf\n# pip install pandas -i https://pypi.mirrors.ustc.edu.cn/simple/\n# pip install jieba -i https://pypi.mirrors.ustc.edu.cn/simple/\n# pip install gensim -i https://pypi.mirrors.ustc.edu.cn/simple/\n# pip install bi-lstm-crf -i https://pypi.mirrors.ustc.edu.cn/simple/\n# https://github.com/jidasheng/bi-lstm-crf\n# python -m bi_lstm_crf \"./DATA/corpus_dir\" --model_dir \"./DATA/model_dir\"\n# 把/data/wyf/.conda/envs/ngp-wyf/lib/python3.10/site-packages/torch/nn/utils/rnn.py第260行的lengths改成lengths.cpu()\n# 把/data/wyf/.conda/envs/ngp-wyf/lib/python3.10/site-packages/torch/_tensor.py第757行的self.numpy()改成self.cpu().numpy()\nimport os\nimport re\nimport jieba\nimport jieba.analyse\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom gensim.models import Word2Vec\nfrom bi_lstm_crf.app import WordsTagger\nif False: # 前三题\n f=open('./DATA/IntelDatahw01.txt','r',encoding='UTF-8')\n txt=[]\n for line in f:\n txt.append(line.strip())\n f.close()\nif False: # 单词拆分\n ToF = [False,True]\n c_flag = 1\n h_flag = 1\n seg_list = jieba.cut(txt[3],cut_all=ToF[c_flag],HMM=ToF[h_flag])\n print(f'cut\\_all={ToF[c_flag]},HMM={ToF[h_flag]}时的单词拆分结果: ' + '/'.join(list(seg_list)[:500]))\nif False: # 关键词提取\n topK = 30\n seg_list = jieba.analyse.extract_tags(txt[3],topK=topK,withWeight=False)\n print(f'extract\\_tags关键词提取结果: ' + '/'.join(list(seg_list)))\n seg_list = jieba.analyse.textrank(txt[3],topK=topK,withWeight=False)\n print(f'textrank关键词提取结果: ' + '/'.join(list(seg_list)))\nif False: # 训练词向量模型\n seg_list = jieba.cut(txt[3],cut_all=False,HMM=True) # 单词拆分\n txt_list = [list(seg_list)] # 迭代器转二维数组\n model = Word2Vec(txt_list, # 训练词向量\n vector_size=100, # 输出词向量维度\n window=5, # 目标词向前向后看该值个词\n min_count=1, # 忽视出现频次小于该值的词\n negative=3, # 设置多个负采样\n sample=0.001, # 更高频率的词被随机下采样到阈值\n sg=1, # 1采用skipgram,0采用CBOW\n hs=1, # 1采用softmax,0采用负采样\n workers=4) # 训练模型使用线程数量\n print(model.wv.most_similar(positive=['历史','社会'], # 正面相似词\n topn=10, # 最相似的前多少个词\n restrict_vocab=None,\n indexer=None))\n print(model.wv.similarity('历史','社会')) # 两个单词的相似度\nif False: # 文本计算vocab参数\n path = './OUTPUT/IntelDatahw01/train/'\n txt_name = [] # 原始数据文件名列表\n chinese_txt = [] # 全部文件汉字数据\n for home,dirs,files in os.walk(path): # 遍历子文件夹下的全部文件\n for filename in files:\n txt_name.append(os.path.join(home,filename)) # 保存全部文件名\n for i in range(3): # 遍历全部文件len(txt_name)\n with open(txt_name[i],'r',encoding='UTF-8') as f:\n for line in f:\n raw_txt.append(line.strip()) # 读取原始数据\n raw_str = ','.join(raw_txt)#转化为非数组类型 \n # w_list = re.findall(r'[\\u4e00-\\u9fa5]',raw_str)\n w_list = re.sub(\"[a-z\\d+]\",\"\",raw_str)\n w_list = list(set(w_list))\n print(raw_str)\n with open('./DATA/corpus_dir/vocab.json',\"w\",encoding='UTF-8') as f:\n f.write(str(w_list))\nif False: # 文本添加BIS标签\n path = './OUTPUT/IntelDatahw01/train/'\n txt_name = [] # 原始数据文件名列表\n chinese_txt = [] # 全部文件汉字数据\n for home,dirs,files in os.walk(path): # 遍历子文件夹下的全部文件\n for filename in files:\n txt_name.append(os.path.join(home,filename)) # 保存全部文件名\n for i in range(len(txt_name)): # 遍历全部文件len(txt_name)\n raw_txt = [] # 一个文件原始数据\n chinese_line = [] # 一行汉字数据\n label_line = [] # 一行标签数据\n B_flag = True # 判断是否词首\n with open(txt_name[i],'r',encoding='UTF-8') as f:\n for line in f:\n raw_txt.append(line.strip()) # 读取原始数据\n for line in range(len(raw_txt)):\n for i in range(len(raw_txt[line])):\n if '\\u4e00' <= raw_txt[line][i] <= '\\u9fff': # 判断是否汉字\n chinese_line.append(raw_txt[line][i])\n if B_flag:\n label_line.append(\"B\") # 词首\n B_flag = False\n else:\n label_line.append(\"I\") # 词中\n else:\n B_flag = True\n for i in range(0,len(label_line)-1): # 判断是否独词\n if label_line[i] == \"B\" and label_line[i+1] == \"B\":\n label_line[i] = \"S\"\n chinese_str = ''.join(str(i) for i in chinese_line) # 字符数组转字符串\n chinese_txt.append(chinese_str)\n chinese_txt.append(label_line)\n chinese_line = []\n label_line = []\n with open('./DATA/corpus_dir/dataset.txt',\"w\",encoding='UTF-8') as f:\n for i in range(len(chinese_txt)):\n if len(chinese_txt[i]) == 0: # 去掉空行\n continue\n f.write(str(chinese_txt[i]))\n if i%2 == 1:\n f.write(\"\\n\") # 奇数行是BIS标签\n else:\n f.write(\"\\t\") # 偶数行是文本\nif False: # LSTM模型训练\n os.system('python -m bi_lstm_crf \"./DATA/corpus_dir\" --model_dir \"./DATA/model_dir\"') # 执行命令行指令\nif False:\n df = pd.read_csv(\"./DATA/model_dir/loss.csv\")\n df[[\"train_loss\", \"val_loss\"]].ffill().plot(grid=True)\n font = {'family':'serif','style':'italic','weight':'bold','color':'black','size':20} # 设置标签字体\n plt.title(f'BI-LSTM-CRF model loss',fontdict=font,fontsize=18) # 显示传递函数类型\n plt.xlabel('train_loss',fontdict=font,fontsize=15) # 设置x轴标签\n plt.ylabel('val_loss',fontdict=font,fontsize=15) # 设置y轴标签\n plt.grid(visible=True) # 显示网格线\n plt.tight_layout() # 自适应调整子图大小\n plt.savefig(f'./OUTPUT/LSTM.png') # 保存图像\n plt.close() # 关闭图像\nif True: # LSTM模型预测\n model = WordsTagger(model_dir=\"./DATA/model_dir\")\n tags,sequences = model([\"我要上课要写作业今天上空间机器人课还有人工神经网络课\"])\n print('tags=',tags)\n print('sequences=',sequences)\n","sub_path":"NeuralModel/IntelData01.py","file_name":"IntelData01.py","file_ext":"py","file_size_in_byte":6717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"238493912","text":"class Solution(object):\n def reverseOnlyLetters(self, S):\n \"\"\"\n :type S: str\n :rtype: str\n \"\"\"\n letter = 'abcdefghijklmnopqrstuvwxyz'\n s = list(S)\n i,j = 0, len(s)-1\n\n while i < j: \n if s[i].lower() not in letter: \n i = i + 1\n elif s[j].lower() not in letter: \n j = j - 1\n else:\n s[i],s[j] = s[j], s[i]\n i = i+1\n j = j-1\n \n return ''.join(s)\n\n\n def reverseOnlyLetters2(self, S):\n \"\"\"\n :type S: str\n :rtype: str\n \"\"\"\n def getNext(S):\n for i in reversed(xrange(len(S))):\n if S[i].isalpha():\n yield S[i]\n\n result = []\n letter = getNext(S)\n for i in xrange(len(S)):\n if S[i].isalpha():\n result.append(letter.next())\n else:\n result.append(S[i])\n return \"\".join(result)\n\n\n\n\nprint(Solution().reverseOnlyLetters(\"ab-cd\"))\nprint(Solution().reverseOnlyLetters2(\"ab-cd\"))","sub_path":"string/0917_reverse_only_letters/0917_reverse_only_letters.py","file_name":"0917_reverse_only_letters.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"582174088","text":"import subprocess\nimport json\n\n# Choose which records gets cleared, \n# 1 for step 1 tests (\"Independent Test Record\") \n# 2 for step 2 tests (\"Testing Searchvalue\")\nstep = 2\nif step == 1:\n member_name = \"Manual Testing Client\"\n orcid_id = \"0000-0002-7361-1027\"\n access_token = \"06702255-c514-4ecc-a225-9c48446a9173\"\nelse:\n member_name = \"Testing Andrej\"\n orcid_id = \"0000-0001-6009-1985\"\n access_token = \"299e0132-623d-4024-9b47-6c9a0e042b39\"\n webhook_token = \"af36161d-0971-4ac6-b860-5bb3f7cdef64\"\n\nputcodes = []\n\ndef main():\n # Load the summary of the relevant record\n record = json.loads(get_record())\n\n # Remove biography items\n delete_bio(record['person'], \"other-names\", \"other-name\", member_name)\n delete_bio(record['person'], \"addresses\", \"address\", member_name)\n delete_bio(record['person'], \"keywords\", \"keyword\", member_name)\n delete_bio(record['person'], \"external-identifiers\", \"external-identifier\", member_name)\n delete_bio(record['person'], \"researcher-urls\", \"researcher-url\", member_name)\n\n # Remove work items\n delete_work(record['activities-summary'], member_name)\n\n # Remove webhook\n if step == 2:\n curl_params = ['-L', '-i', '-k', '-H', 'Authorization: Bearer %s' % webhook_token, '-H', 'Content-Length: 0', '-H','Accept: application/json', '-k', '-X', 'DELETE']\n orcid_curl(\"https://api.qa.orcid.org/\" + orcid_id + \"/webhook/http%3A%2F%2Fnowhere3.com%2Fupdated\", curl_params)\n\ndef orcid_curl(url, curl_opts):\n curl_call = [\"curl\"] + curl_opts + [url]\n try:\n p = subprocess.Popen(curl_call, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n print(subprocess.list2cmdline(curl_call))\n output, err = p.communicate()\n return output\n except Exception as e:\n raise Exception(e)\n\ndef get_record():\n curl_params = ['-L', '-i', '-k', '-H', 'Authorization: Bearer %s' % access_token, '-H', 'Content-Length: 0', '-H',\n 'Accept: application/json', '-k', '-X', 'GET']\n response = orcid_curl(\"https://api.qa.orcid.org/v3.0/%s/%s\" % (orcid_id, \"record\"), curl_params)\n return response.partition('X-Frame-Options: DENY')[2]\n\n\ndef delete_bio(record, endpoint, endpoint_value, source_name):\n for x in record[endpoint][endpoint_value]:\n if x['source']['source-name']['value'] == source_name:\n if endpoint == \"external-identifiers\" or endpoint == \"researcher-urls\" or endpoint == \"keywords\" or endpoint == \"other-names\":\n delete(x['put-code'], endpoint)\n else:\n delete(x['put-code'], endpoint_value)\n\n\ndef delete_work(record, source_name):\n for x in record['educations']['affiliation-group']:\n if x['summaries'][0]['education-summary']['source']['source-name']['value'] == source_name:\n delete(x['summaries'][0]['education-summary']['put-code'], \"education\")\n\n for x in record['employments']['affiliation-group']:\n if x['summaries'][0]['employment-summary']['source']['source-name']['value'] == source_name:\n delete(x['summaries'][0]['put-code'], \"employment\")\n\n for x in record['fundings']['group']:\n if x['funding-summary'][0]['source']['source-name']['value'] == source_name:\n delete(x['funding-summary'][0]['put-code'], \"funding\")\n\n for x in record['research-resources']['group']:\n if x['research-resource-summary'][0]['source']['source-name']['value'] == source_name:\n delete(x['research-resource-summary'][0]['put-code'], \"research-resource\")\n\n for x in record['works']['group']:\n if x['work-summary'][0]['source']['source-name']['value'] == source_name:\n delete(x['work-summary'][0]['put-code'], \"work\")\n\n for x in record['qualifications']['affiliation-group']:\n if x['summaries'][0]['qualification-summary']['source']['source-name']['value'] == source_name:\n delete(x['summaries'][0]['qualification-summary']['put-code'], \"qualification\")\n\n for x in record['peer-reviews']['group']:\n for y in x['peer-review-group']:\n if y['peer-review-summary'][0]['source']['source-name']['value'] == source_name:\n delete(y['peer-review-summary'][0]['put-code'], \"peer-review\")\n\n\ndef delete(putcode, endpoint):\n\n curl_params = ['-L', '-i', '-k', '-H', 'Authorization: Bearer %s' % access_token, '-H', 'Content-Length: 0', '-H',\n 'Accept: application/json', '-k', '-X', 'DELETE']\n response = orcid_curl(\"https://api.qa.orcid.org/v3.0/%s/%s/%s\" % (orcid_id, endpoint, putcode), curl_params)\n print(response)\n print (\"****************** %s deleted ******************\" % endpoint)\n print (\"\")\n\nmain()\n","sub_path":"orcid/delete_record_contents.py","file_name":"delete_record_contents.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"535001868","text":"#\n# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved. Licensed under the new BSD license.\n#\nimport logging\nimport re\nimport platform\nimport sys\n\n\nif platform.system() == 'Windows':\n import ctypes\n import ctypes.wintypes\n\n # Reference: https://gist.github.com/vsajip/758430\n # https://github.com/ipython/ipython/issues/4252\n # https://msdn.microsoft.com/en-us/library/windows/desktop/ms686047%28v=vs.85%29.aspx\n ctypes.windll.kernel32.SetConsoleTextAttribute.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD]\n ctypes.windll.kernel32.SetConsoleTextAttribute.restype = ctypes.wintypes.BOOL\n\n\nclass ColorizingStreamHandler(logging.StreamHandler):\n # color names to indices\n color_map = {\n 'black': 0,\n 'red': 1,\n 'green': 2,\n 'yellow': 3,\n 'blue': 4,\n 'magenta': 5,\n 'cyan': 6,\n 'white': 7,\n }\n\n # levels to (background, foreground, bold/intense)\n level_map = {\n logging.DEBUG: (None, 'blue', False),\n logging.INFO: (None, 'white', False),\n logging.WARNING: (None, 'yellow', False),\n logging.ERROR: (None, 'red', False),\n logging.CRITICAL: ('red', 'white', False)\n }\n csi = '\\x1b['\n reset = '\\x1b[0m'\n disable_coloring = False\n\n @property\n def is_tty(self):\n isatty = getattr(self.stream, 'isatty', None)\n return isatty and isatty() and not self.disable_coloring\n\n def emit(self, record):\n try:\n message = self.format(record)\n stream = self.stream\n\n if not self.is_tty:\n if message and message[0] == \"\\r\":\n message = message[1:]\n stream.write(message)\n else:\n self.output_colorized(message)\n stream.write(getattr(self, 'terminator', '\\n'))\n\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except IOError:\n pass\n except:\n self.handleError(record)\n\n\n if not platform.system() == 'Windows':\n def output_colorized(self, message):\n self.stream.write(message)\n else:\n ansi_esc = re.compile(r'\\x1b\\[((?:\\d+)(?:;(?:\\d+))*)m')\n\n nt_color_map = {\n 0: 0x00, # black\n 1: 0x04, # red\n 2: 0x02, # green\n 3: 0x06, # yellow\n 4: 0x01, # blue\n 5: 0x05, # magenta\n 6: 0x03, # cyan\n 7: 0x07, # white\n }\n\n def output_colorized(self, message):\n parts = self.ansi_esc.split(message)\n write = self.stream.write\n h = None\n fd = getattr(self.stream, 'fileno', None)\n\n if fd is not None:\n fd = fd()\n\n if fd in (1, 2): # stdout or stderr\n h = ctypes.windll.kernel32.GetStdHandle(-10 - fd)\n\n while parts:\n text = parts.pop(0)\n\n if text:\n if sys.version_info < (3, 0, 0):\n write(text.encode('utf-8'))\n else:\n write(text)\n\n if parts:\n params = parts.pop(0)\n\n if h is not None:\n params = [int(p) for p in params.split(';')]\n color = 0\n\n for p in params:\n if 40 <= p <= 47:\n color |= self.nt_color_map[p - 40] << 4\n elif 30 <= p <= 37:\n color |= self.nt_color_map[p - 30]\n elif p == 1:\n color |= 0x08 # foreground intensity on\n elif p == 0: # reset to default color\n color = 0x07\n else:\n pass # error condition ignored\n\n ctypes.windll.kernel32.SetConsoleTextAttribute(h, color)\n\n def colorize(self, message, record):\n if record.levelno in self.level_map and self.is_tty:\n bg, fg, bold = self.level_map[record.levelno]\n params = []\n\n if bg in self.color_map:\n params.append(str(self.color_map[bg] + 40))\n\n if fg in self.color_map:\n params.append(str(self.color_map[fg] + 30))\n\n if bold:\n params.append('1')\n\n if params and message:\n if message.lstrip() != message:\n prefix = re.search(r\"\\s+\", message).group(0)\n message = message[len(prefix):]\n else:\n prefix = \"\"\n\n message = \"%s%s\" % (prefix, ''.join((self.csi, ';'.join(params),\n 'm', message, self.reset)))\n\n return message\n\n def format(self, record):\n message = logging.StreamHandler.format(self, record)\n return self.colorize(message, record)\n\n\nlogging.addLevelName(16, \"SUCCESS\")\nlogger = logging.getLogger('nhentai')\nLOGGER_HANDLER = ColorizingStreamHandler(sys.stdout)\nFORMATTER = logging.Formatter(\"\\r[%(asctime)s] %(funcName)s: %(message)s\", \"%H:%M:%S\")\nLOGGER_HANDLER.setFormatter(FORMATTER)\nLOGGER_HANDLER.level_map[logging.getLevelName(\"SUCCESS\")] = (None, \"green\", False)\nlogger.addHandler(LOGGER_HANDLER)\nlogger.setLevel(logging.DEBUG)\n\n\nif __name__ == '__main__':\n logger.log(16, 'nhentai')\n logger.info('info')\n logger.warning('warning')\n logger.debug('debug')\n logger.error('error')\n logger.critical('critical')\n","sub_path":"nhentai/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":5676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"387335465","text":"import os\n\nimport datetime as dt\nimport numpy as np\nimport pandas as pd\nimport json \n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify, render_template\n# from flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\n#################################################\n# Database Setup\n#################################################\n\nengine = create_engine(\"sqlite:///db/fifa_players.sqlite\")\ncon = engine.connect()\ncon.execute(\"SELECT * FROM player_table\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save references to each table\nplayers = Base.classes.player_table\n\n# print(players)\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n\n\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef index():\n \"\"\"Return the homepage.\"\"\"\n return (\n f\"Available Routes:
\"\n f\"/players\"\n )\n\n@app.route(\"/players\")\ndef stats():\n \"\"\"Return a list player data\"\"\"\n # Query all players\n sel = [\n players.sofifa_id,\n players.short_name,\n players.age,\n players.nationality,\n players.overall,\n players.club,\n players.value_eur,\n players.wage_eur,\n players.preferred_foot,\n players.team_position,\n players.bmi,\n players.height_in,\n players.weight_lbs\n ]\n results = session.query(*sel).all()\n\n session.close()\n\n # Create a dictionary from the row data and append to a list of all_passengers\n all_players = []\n # import base64\n for sofifa_id, short_name, age, nationality, overall, club, value_eur, wage_eur, preferred_foot, team_position, bmi, height_in, weight_lbs in results:\n # encoded = base64.b64encode(b'{}'.format(short_name)) \n player_dict = {}\n player_dict[\"sofifa_id\"] = sofifa_id\n player_dict[\"short_name\"] = short_name\n # player_dict[\"short_name\"] = encoded.decode('ascii') \n player_dict[\"age\"] = age\n player_dict[\"nationality\"] = nationality\n player_dict[\"overall\"] = overall\n player_dict[\"club\"] = club\n player_dict[\"value_eur\"] = value_eur\n player_dict[\"wage_eur\"] = wage_eur\n player_dict[\"preferred_foot\"] = preferred_foot\n player_dict[\"team_position\"] = team_position\n player_dict[\"bmi\"] = bmi\n player_dict[\"height_in\"] = height_in\n player_dict[\"weight_lbs\"] = weight_lbs\n all_players.append(player_dict)\n\n print(all_players)\n with open(\"top_players.json\", \"w\") as outfile: \n json.dump(all_players, outfile)\n\n\n return jsonify(all_players)\n\n\nif __name__ == '__main__':\n\n app.run(debug=True)\n ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376156945","text":"#\n# @lc app=leetcode.cn id=647 lang=python3\n#\n# [647] 回文子串\n#\n# https://leetcode-cn.com/problems/palindromic-substrings/description/\n#\n# algorithms\n# Medium (62.39%)\n# Likes: 315\n# Dislikes: 0\n# Total Accepted: 39.4K\n# Total Submissions: 62.4K\n# Testcase Example: '\"abc\"'\n#\n# 给定一个字符串,你的任务是计算这个字符串中有多少个回文子串。\n# \n# 具有不同开始位置或结束位置的子串,即使是由相同的字符组成,也会被视作不同的子串。\n# \n# \n# \n# 示例 1:\n# \n# 输入:\"abc\"\n# 输出:3\n# 解释:三个回文子串: \"a\", \"b\", \"c\"\n# \n# \n# 示例 2:\n# \n# 输入:\"aaa\"\n# 输出:6\n# 解释:6个回文子串: \"a\", \"a\", \"a\", \"aa\", \"aa\", \"aaa\"\n# \n# \n# \n# 提示:\n# \n# \n# 输入的字符串长度不会超过 1000 。\n# \n# \n#\n\n\n# @lc code=start\nclass Solution:\n def countSubstrings(self, s: str) -> int:\n r = 0\n n = len(s)\n dp = [[0] * n for _ in range(n)]\n for i in range(n):\n for j in range(i + 1):\n if s[i] == s[j] and (i - j < 2 or dp[j + 1][i - 1]):\n dp[j][i] = 1\n r += 1\n return r\n\n# @lc code=end\n","sub_path":"medium/647.回文子串.py","file_name":"647.回文子串.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"308416239","text":"import logging\nlogger = logging.getLogger(__name__)\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import Http404, get_object_or_404, render_to_response\nfrom django.template import RequestContext\n\nfrom ui.models import Repository, Organization, Collection, Entity, File\n\n\n# views ----------------------------------------------------------------\n\ndef detail( request, repo, org, cid, eid ):\n entity = Entity.get(repo, org, cid, eid)\n if not entity:\n raise Http404\n return render_to_response(\n 'ui/entities/detail.html',\n {\n 'repo': repo,\n 'org': org,\n 'cid': cid,\n 'eid': eid,\n 'object': entity,\n },\n context_instance=RequestContext(request, processors=[])\n )\n\ndef files( request, repo, org, cid, eid ):\n \"\"\"Lists all the files in an entity.\n \"\"\"\n entity = Entity.get(repo, org, cid, eid)\n if not entity:\n raise Http404\n paginator = Paginator(entity.files(), settings.RESULTS_PER_PAGE)\n page = paginator.page(request.GET.get('page', 1))\n return render_to_response(\n 'ui/entities/files.html',\n {\n 'repo': repo,\n 'org': org,\n 'cid': cid,\n 'eid': eid,\n 'object': entity,\n 'paginator': paginator,\n 'page': page,\n },\n context_instance=RequestContext(request, processors=[])\n )\n","sub_path":"ddrpublic/ui/views/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"530654679","text":"import random\nimport faker\nimport datetime\n\nfrom labgrownsheets.model import StarSchemaModel\n\nnum_iterations = 1000\nscale_factor = 4\nfolder = 'sample-data'\n\nfake = faker.Faker()\nlow_date = datetime.datetime(2018, 11, 1)\nhigh_date = datetime.datetime(2018, 12, 1)\nnum_days = (high_date - low_date).days\nnum_currencies = 5\n\n\"\"\"\nUsage: python generate_dummy_date.py\n\nGenerates a some sample star schema entities for a given number of rows in a specified file. The size of the data \nwill be proportional to the number of iterations and the specified scale_factor.\n\"\"\"\n\n\ndef generate_customer():\n gender = random.sample([\"male\", \"female\"], 1)[0]\n if gender == \"male\":\n name = fake.name_male()\n else:\n name = fake.name_female()\n address = fake.address()\n\n first, last = name.split(' ')[-2:]\n return {\"first_name\": first,\n \"last_name\": last,\n \"gender\": gender,\n \"address\": address.replace('\\n', ', ')}\n\n\ndef generate_product():\n name = fake.bs().split(' ')[-1]\n desc = fake.paragraph()\n return {\n \"name\": name,\n \"long_desc\": desc\n }\n\n\ndef generate_order():\n return {\n 'order_time': fake.date_time_between(low_date, high_date)\n }\n\n\ndef generate_order_item():\n return {\n \"amount\": random.weibullvariate(1, 0.5) * 100\n }\n\n\ndef generate_currency():\n curs = [\"AUD\"]\n yield {\n \"currency\": \"AUD\"\n }\n while True:\n new_cur = fake.currency()[0]\n if new_cur not in curs:\n curs.append(new_cur)\n yield {\n \"currency\": new_cur\n }\n\n\ndef generate_currency_conv():\n def daterange(start_date, end_date):\n for n in range(int((end_date - start_date).days)):\n yield start_date + datetime.timedelta(n)\n\n while True:\n root_value = random.weibullvariate(1, 3)\n\n for cur_day in daterange(low_date, high_date):\n yield {\n \"day_value\": cur_day,\n \"to_aud\": root_value\n }\n root_value += random.gauss(0, root_value / 100)\n\n\ndef get_num_products(num_iterations, scale_factor):\n return random.randint(\n min(random.randint(50, 100), int(num_iterations / scale_factor - num_iterations / scale_factor / 2)),\n min(random.randint(150, 200), int(num_iterations / scale_factor + num_iterations / scale_factor / 2))\n )\n\n\ndef main():\n num_products = get_num_products(num_iterations, scale_factor)\n\n schema = [\n # DIMS\n ('naive', {\n 'name': 'customer', # the name of the entity/table\n 'entity_generator': generate_customer, # function that defines entity\n 'num_iterations': num_iterations # How many times to run that function\n }),\n ('naive', {\n 'name': 'product',\n 'entity_generator': generate_product,\n 'num_iterations': num_products\n }),\n ('naive', {\n 'name': 'currency',\n 'entity_generator': generate_currency,\n 'num_iterations': num_currencies\n }),\n # FACTS\n ('naive', {\n 'name': 'orders',\n 'entity_generator': generate_order,\n 'num_iterations': num_iterations * scale_factor,\n 'relations': [{'name': 'customer'},\n {'name': 'currency'}],\n 'schema': [{'name': 'currency', 'parent_entity': 'currency'}]\n }),\n ('naive', {\n 'name': 'order_item',\n 'entity_generator': generate_order_item,\n 'num_iterations': num_iterations * scale_factor,\n 'num_entities_per_iteration': lambda: random.randint(1, 3), # Number of facts per iteration (e.g. 3 items 1 order)\n 'relations': [{'name': 'orders', 'unique': True},\n {'name': 'product', 'type': 'many_to_many', 'unique': True}]\n # Each iteration has the same entity link for one_to_many relations (e.g. one order_id per order_item)\n # For many_to_many this link is sampled - if unique_per_fact then it is sampled without replacement.\n # In this example an order has multiple order items, each linked to a unique_per_fact product within that order\n # If an order could have multiple of the same product then unique_per_fact would be false\n }),\n ('naive', {\n 'name': 'currency_conversion',\n 'entity_generator': generate_currency_conv,\n 'num_iterations': num_currencies,\n 'num_entities_per_iteration': num_days, # We get one record per currency per day\n 'relations': [{'name': 'currency', 'unique': True}],\n # Here the default type is one_to_many - in this case there will be a unique value for each iteration\n # Sampled from the source table - note this will fail if there are more iterations that values in\n # The original table.\n 'schema': [{'name': 'currency', 'parent_entity': 'currency'}]\n })\n ]\n\n dummy_data = StarSchemaModel.from_list(schema)\n dummy_data.generate_all_datasets(print_progress=True)\n dummy_data.to_csv(folder)\n dummy_data.to_pickled_pyschema(folder)\n print(\"Done\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/denormalised_example.py","file_name":"denormalised_example.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"650893152","text":"def overlap(inF1, inF2, ouF):\n ouFile = open(ouF, 'w')\n Up = {}\n Down = {}\n\n inFile = open(inF1)\n head1 = inFile.readline().strip()\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n if float(fields[1]) > 0:\n Up[fields[0]] = line\n elif float(fields[1]) < 0:\n Down[fields[0]] = line\n\n inFile.close()\n\n inFile = open(inF2)\n head2 = inFile.readline().strip()\n ouFile.write(head2 + '\\t' + head1 + '\\n')\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n fc = float(fields[2])\n gene = fields[7]\n if fc > 0:\n if gene in Up:\n ouFile.write(line + '\\t' + Up[gene] + '\\n')\n if fc < 0:\n if gene in Down:\n ouFile.write(line + '\\t' + Down[gene] + '\\n')\n inFile.close()\n ouFile.close()\n\noverlap('K562-Proteomics-log2-Diff-SigFC.txt', 'K562_Exp1Exp2_sig_proteincoding.txt', 'K562_Exp1Exp2_sig_proteincoding_K562-Proteomics-log2-Diff-SigFC')\noverlap('K562-Proteomics-log2-Diff-Sig.txt', 'K562_Exp1Exp2_sig_proteincoding.txt', 'K562_Exp1Exp2_sig_proteincoding_K562-Proteomics-log2-Diff-Sig')\n","sub_path":"K562/10-RNASeqProteomics/01-overlap.py","file_name":"01-overlap.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"426453688","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nimport json, psycopg2, paramiko\n\n###\nwarn_bs = {}\n###\n\ndef load_params():\n json_file = './pd_params.json'\n with open(json_file) as read_file:\n pd_params = json.load(read_file)\n return pd_params\n\ndef con_db():\n pd_params = load_params()\n try:\n db = psycopg2.connect(**pd_params['db'])\n except psycopg2.Error :#as e:\n #print(e)\n return False\n else:\n return db\n\ndef sql_act(sql,n=1):\n global warn_bs\n db = con_db()\n if db:\n cur = db.cursor()\n try:\n if n == 0:\n cur.execute(sql)\n db.commit()\n else:\n cur.execute(sql)\n rows = cur.fetchall()\n return rows\n except psycopg2.Error as e:\n #print \"\\nWARNING: Wrong with operating the db, %s \" % str(e).strip()\n warn_bs['db'] = \"WARNING: Wrong with operating the db, \" + str(e).strip()\n return False\n finally:\n cur.close()\n db.close()\n else:\n #print \"\\nWARNING: Connection to the db is Error.\"\n warn_bs['db'] = \"WARNING: Connection to the db is Error.\"\n\ndef pg_act(table,action,args=[]):\n if args:\n if action == 'delete':\n cond_keys = args[0].keys()\n conds = []\n for key in cond_keys:\n conds.append(\"='\".join([key,args[0][key]]))\n cond = \"' AND \".join(conds)\n sql = \"DELETE FROM \" + table + \" WHERE \" + cond + \"'\"\n sql_act(sql, 0)\n if action == \"insert\":\n #args = [{'obj_id':'1',}]\n rows = args[0].keys()\n vals = []\n for row in rows:\n vals.append(args[0][row])\n sql = \"INSERT INTO \" + table + \" (\" + \", \".join(rows) + \") VALUES ('\" + \"', '\".join(vals) +\"')\"\n sql_act(sql, 0)\n if action == \"update\":\n #args = [{'obj_name':'x','obj_comp_time':'2019-01-01 00:00:00'},{'obj_id':'1','obs_stag':'sent'}]\n rows = args[0].keys()\n targs = []\n for row in rows:\n targs.append(\"='\".join([row,args[0][row]]))\n targ = \"' , \".join(targs)\n cond_keys = args[1].keys()\n conds = []\n for key in cond_keys:\n conds.append(\"='\".join([key,args[1][key]]))\n cond = \"' AND \".join(conds)\n sql = \"UPDATE \" + table + \" SET \" + targ + \"' WHERE \" + cond + \"'\"\n sql_act(sql, 0)\n if action == \"select\":\n #args = [['1','2'],{'obj_name':'x','obj_comp_time':'2019-01-01 00:00:00'}]\n rows = ','.join(args[0])\n cond_keys = args[1].keys()\n conds = []\n if len(args) > 2:\n cond_more = args[2]\n else:\n cond_more = ''\n for key in cond_keys:\n conds.append(\"='\".join([key,args[1][key]]))\n cond = \"' AND \".join(conds)\n sql = \"SELECT \" + rows + \" FROM \" + table + \" WHERE \" + cond + \"' \" + cond_more \n res = sql_act(sql)\n return res\n\ndef con_ssh(ip, username, passwd, cmd):\n global warn_bs\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy)\n try:\n ssh.connect(hostname=ip, port=22, username=username, password=passwd, timeout=60)\n except:\n #print \"\\nWARNING: Connection of ssh is wrong!\"\n warn_bs['ssh'] = 'WARNING: Connection to %s by ssh is wrong!' % ip\n else:\n stdin, stdout, stderr = ssh.exec_command(cmd,get_pty=True)\n out = stdout.readlines()\n ssh.close()\n return out","sub_path":"plan_dispatch/pd_by_paramiko/pd_v2/pd_tools.py","file_name":"pd_tools.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"372634768","text":"MODE_ARM = 0\nMODE_THUMB = 1\nMODE_JAZELLE = 2\n\n#IFLAGS - keep bottom 8-bits for cross-platform flags like envi.IF_NOFALL and envi.IF_BRFALL\nIF_PSR_S = 1<<32 # This DP instruciton can update CPSR\nIF_B = 1<<33 # Byte\nIF_H = 1<<35 # HalfWord\nIF_S = 1<<36 # Signed\nIF_D = 1<<37 # Dword\nIF_L = 1<<38 # Long-store (eg. Dblword Precision) for STC\nIF_T = 1<<39 # Translate for strCCbt\nIF_W = 1<<40 # Write Back for STM/LDM (!)\nIF_UM = 1<<41 # User Mode Registers for STM/LDM (^) (obviously no R15)\n\nIF_DAIB_SHFT = 56 # shift-bits to get DAIB bits down to 0. this chops off the \"is DAIB present\" bit that the following store.\nIF_DAIB_MASK = 7<<(IF_DAIB_SHFT-1)\nIF_DA = 1<<(IF_DAIB_SHFT-1) # Decrement After\nIF_IA = 3<<(IF_DAIB_SHFT-1) # Increment After\nIF_DB = 5<<(IF_DAIB_SHFT-1) # Decrement Before\nIF_IB = 7<<(IF_DAIB_SHFT-1) # Increment Before\nIF_DAIB_B = 5<<(IF_DAIB_SHFT-1) # Before mask \nIF_DAIB_I = 3<<(IF_DAIB_SHFT-1) # Before mask \nIF_THUMB32 = 1<<50 # thumb32\nIF_VQ = 1<<51 # Adv SIMD: operation uses saturating arithmetic\nIF_VR = 1<<52 # Adv SIMD: operation performs rounding\nIF_VD = 1<<53 # Adv SIMD: operation doubles the result\nIF_VH = 1<<54 # Adv SIMD: operation halves the result\nIF_SYS_MODE = 1<<58 # instruction is encoded to be executed in SYSTEM mode, not USER mode\n\nOF_W = 1<<8 # Write back to \nOF_UM = 1<<9 # Usermode, or if r15 included set current SPSR -> CPSR\n\n\nOSZFMT_BYTE = \"B\"\nOSZFMT_HWORD = \"\\d+)/$', NewsDetailView.as_view()),\n url(r'^(?P\\d+)/comment/', comment, name='add_comment'),\n )\n","sub_path":"news/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"170735605","text":"\"\"\"Support for the GIOS service.\"\"\"\nfrom homeassistant.components.air_quality import (\n ATTR_CO,\n ATTR_NO2,\n ATTR_OZONE,\n ATTR_PM_2_5,\n ATTR_PM_10,\n ATTR_SO2,\n AirQualityEntity,\n)\nfrom homeassistant.const import CONF_NAME\n\nfrom .const import ATTR_STATION, DATA_CLIENT, DEFAULT_SCAN_INTERVAL, DOMAIN, ICONS_MAP\n\nATTRIBUTION = \"Data provided by GIOŚ\"\nSCAN_INTERVAL = DEFAULT_SCAN_INTERVAL\n\n\nasync def async_setup_entry(hass, config_entry, async_add_entities):\n \"\"\"Add a GIOS entities from a config_entry.\"\"\"\n name = config_entry.data[CONF_NAME]\n\n data = hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id]\n\n async_add_entities([GiosAirQuality(data, name)], True)\n\n\ndef round_state(func):\n \"\"\"Round state.\"\"\"\n\n def _decorator(self):\n res = func(self)\n if isinstance(res, float):\n return round(res)\n return res\n\n return _decorator\n\n\nclass GiosAirQuality(AirQualityEntity):\n \"\"\"Define an GIOS sensor.\"\"\"\n\n def __init__(self, gios, name):\n \"\"\"Initialize.\"\"\"\n self.gios = gios\n self._name = name\n self._aqi = None\n self._co = None\n self._no2 = None\n self._o3 = None\n self._pm_2_5 = None\n self._pm_10 = None\n self._so2 = None\n self._attrs = {}\n\n @property\n def name(self):\n \"\"\"Return the name.\"\"\"\n return self._name\n\n @property\n def icon(self):\n \"\"\"Return the icon.\"\"\"\n if self._aqi in ICONS_MAP:\n return ICONS_MAP[self._aqi]\n return \"mdi:blur\"\n\n @property\n def air_quality_index(self):\n \"\"\"Return the air quality index.\"\"\"\n return self._aqi\n\n @property\n @round_state\n def particulate_matter_2_5(self):\n \"\"\"Return the particulate matter 2.5 level.\"\"\"\n return self._pm_2_5\n\n @property\n @round_state\n def particulate_matter_10(self):\n \"\"\"Return the particulate matter 10 level.\"\"\"\n return self._pm_10\n\n @property\n @round_state\n def ozone(self):\n \"\"\"Return the O3 (ozone) level.\"\"\"\n return self._o3\n\n @property\n @round_state\n def carbon_monoxide(self):\n \"\"\"Return the CO (carbon monoxide) level.\"\"\"\n return self._co\n\n @property\n @round_state\n def sulphur_dioxide(self):\n \"\"\"Return the SO2 (sulphur dioxide) level.\"\"\"\n return self._so2\n\n @property\n @round_state\n def nitrogen_dioxide(self):\n \"\"\"Return the NO2 (nitrogen dioxide) level.\"\"\"\n return self._no2\n\n @property\n def attribution(self):\n \"\"\"Return the attribution.\"\"\"\n return ATTRIBUTION\n\n @property\n def unique_id(self):\n \"\"\"Return a unique_id for this entity.\"\"\"\n return self.gios.station_id\n\n @property\n def available(self):\n \"\"\"Return True if entity is available.\"\"\"\n return self.gios.available\n\n @property\n def device_state_attributes(self):\n \"\"\"Return the state attributes.\"\"\"\n self._attrs[ATTR_STATION] = self.gios.station_name\n return self._attrs\n\n async def async_update(self):\n \"\"\"Get the data from GIOS.\"\"\"\n await self.gios.async_update()\n\n if self.gios.available:\n # Different measuring stations have different sets of sensors. We don't know\n # what data we will get.\n if \"AQI\" in self.gios.sensors:\n self._aqi = self.gios.sensors[\"AQI\"][\"value\"]\n if \"CO\" in self.gios.sensors:\n self._co = self.gios.sensors[\"CO\"][\"value\"]\n self._attrs[f\"{ATTR_CO}_index\"] = self.gios.sensors[\"CO\"][\"index\"]\n if \"NO2\" in self.gios.sensors:\n self._no2 = self.gios.sensors[\"NO2\"][\"value\"]\n self._attrs[f\"{ATTR_NO2}_index\"] = self.gios.sensors[\"NO2\"][\"index\"]\n if \"O3\" in self.gios.sensors:\n self._o3 = self.gios.sensors[\"O3\"][\"value\"]\n self._attrs[f\"{ATTR_OZONE}_index\"] = self.gios.sensors[\"O3\"][\"index\"]\n if \"PM2.5\" in self.gios.sensors:\n self._pm_2_5 = self.gios.sensors[\"PM2.5\"][\"value\"]\n self._attrs[f\"{ATTR_PM_2_5}_index\"] = self.gios.sensors[\"PM2.5\"][\n \"index\"\n ]\n if \"PM10\" in self.gios.sensors:\n self._pm_10 = self.gios.sensors[\"PM10\"][\"value\"]\n self._attrs[f\"{ATTR_PM_10}_index\"] = self.gios.sensors[\"PM10\"][\"index\"]\n if \"SO2\" in self.gios.sensors:\n self._so2 = self.gios.sensors[\"SO2\"][\"value\"]\n self._attrs[f\"{ATTR_SO2}_index\"] = self.gios.sensors[\"SO2\"][\"index\"]\n","sub_path":"homeassistant/components/gios/air_quality.py","file_name":"air_quality.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"411136837","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom keras.models import Model\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers.convolutional import Convolution1D\nfrom keras.layers import *\nfrom keras.layers.embeddings import Embedding\nfrom keras.optimizers import Adam\nfrom keras.objectives import categorical_crossentropy\nfrom keras.optimizers import Adam, RMSprop\n\ndef build_model(token_len, token_char_vector_dict,\n nb_encoding_layers, nb_dense_dims,\n lemma_len, lemma_char_vector_dict,\n nb_tags, nb_morph_cats,\n nb_lemmas, nb_train_tokens,\n nb_context_tokens,\n nb_embedding_dims,\n pretrained_embeddings=None,\n include_token=True,\n include_context=True,\n include_lemma=True,\n include_pos=True,\n include_morph=True,\n nb_filters = 100,\n filter_length = 3,\n focus_repr = 'recurrent',\n dropout_level = .15,\n ):\n \n inputs, outputs = [], []\n subnets = []\n \n if include_token:\n # add input layer:\n token_input = Input(shape=(token_len, len(token_char_vector_dict)),\n name='focus_in')\n inputs.append(token_input)\n\n if focus_repr == 'recurrent':\n # add recurrent layers to model focus token:\n for i in range(nb_encoding_layers):\n if i == 0:\n curr_input = token_input\n else:\n curr_input = curr_enc_out\n\n if i == (nb_encoding_layers - 1):\n token_subnet = Bidirectional(LSTM(output_dim=nb_dense_dims,\n return_sequences=False,\n activation='tanh',\n name='final_focus_encoder'),\n merge_mode='sum')(curr_input)\n else:\n curr_enc_out = Bidirectional(LSTM(output_dim=nb_dense_dims,\n return_sequences=True,\n activation='tanh',\n name='encoder_'+str(i+1)),\n merge_mode='sum')(curr_input)\n elif focus_repr == 'convolutions':\n token_subnet = Convolution1D(input_shape=(token_len, len(token_char_vector_dict)),\n nb_filter=nb_filters,\n filter_length=filter_length,\n activation='relu',\n border_mode='valid',\n subsample_length=1,\n init='glorot_uniform',\n name='focus_conv')(token_input)\n token_subnet = Flatten(name='focus_flat')(token_subnet)\n token_subnet = Dropout(dropout_level, name='focus_dropout1')(token_subnet)\n token_subnet = Dense(nb_dense_dims, name='focus_dense')(token_subnet)\n token_subnet = Dropout(dropout_level, name='focus_dropout2')(token_subnet)\n token_subnet = Activation('relu', name='final_focus_encoder')(token_subnet)\n\n else:\n raise ValueError('Parameter `focus_repr` not understood: use \"recurrent\" or \"convolutions\".')\n\n subnets.append(token_subnet)\n\n if include_context:\n context_input = Input(shape=(nb_context_tokens,), dtype='int32', name='context_in')\n inputs.append(context_input)\n\n context_subnet = Embedding(input_dim=nb_train_tokens,\n output_dim=nb_embedding_dims,\n weights=pretrained_embeddings,\n input_length=nb_context_tokens,\n name='context_embedding')(context_input)\n context_subnet = Flatten(name='context_flatten')(context_subnet)\n context_subnet = Dropout(dropout_level, name='context_dropout')(context_subnet)\n context_subnet = Activation('relu', name='context_relu')(context_subnet)\n context_subnet = Dense(nb_dense_dims, name='context_dense1')(context_subnet)\n context_subnet = Dropout(dropout_level, name='context_dropout2')(context_subnet)\n context_subnet = Activation('relu', name='context_out')(context_subnet)\n\n subnets.append(context_subnet)\n\n # combine subnets:\n if len(subnets) > 1:\n joined = merge(subnets, mode='concat', name='joined')\n else:\n joined = Activation('linear', name='joined')(subnets[0])\n\n if include_lemma:\n if include_lemma == 'generate':\n repeat = RepeatVector(lemma_len, name='encoder_repeat')(joined)\n\n for i in range(nb_encoding_layers):\n if i == 0:\n curr_input = repeat\n else:\n curr_input = curr_out\n\n if i == (nb_encoding_layers - 1):\n output_name = 'final_focus_decoder'\n else:\n output_name = 'decoder_'+str(i + 1)\n\n curr_out = Bidirectional(LSTM(output_dim=nb_dense_dims,\n return_sequences=True,\n activation='tanh',\n name=output_name),\n merge_mode='sum')(curr_input)\n # add lemma decoder\n lemma_label = TimeDistributed(Dense(len(lemma_char_vector_dict)),\n name='lemma_dense')(curr_out)\n lemma_label = Activation('softmax', name='lemma_out')(lemma_label)\n\n elif include_lemma == 'label':\n lemma_label = Dense(nb_lemmas,\n name='lemma_dense1')(joined)\n lemma_label = Dropout(dropout_level,\n name='lemma_dense_dropout1')(lemma_label)\n lemma_label = Activation('softmax',\n name='lemma_out')(lemma_label)\n\n outputs.append(lemma_label)\n\n if include_pos:\n pos_label = Dense(nb_tags,\n name='pos_dense1')(joined)\n pos_label = Dropout(dropout_level,\n name='pos_dense_dropout1')(pos_label)\n pos_label = Activation('softmax',\n name='pos_out')(pos_label)\n outputs.append(pos_label)\n\n if include_morph:\n if include_morph == 'label':\n morph_label = Dense(nb_dense_dims,\n activation='relu',\n name='morph_dense1')(joined)\n morph_label = Dropout(dropout_level,\n name='morph_dense_dropout1')(morph_label)\n morph_label = Dense(nb_dense_dims,\n activation='relu',\n name='morph_dense2')(morph_label)\n morph_label = Dropout(dropout_level,\n name='morph_dense_dropout2')(morph_label)\n morph_label = Dense(nb_morph_cats,\n activation='relu',\n name='morph_dense3')(morph_label)\n morph_label = Dropout(dropout_level,\n name='morph_dense_dropout3')(morph_label)\n morph_label = Activation('softmax',\n name='morph_out')(morph_label)\n\n elif include_morph == 'multilabel':\n morph_label = Dense(nb_dense_dims,\n activation='relu',\n name='morph_dense1')(joined)\n morph_label = Dropout(dropout_level,\n name='morph_dense_dropout1')(morph_label)\n morph_label = Dense(nb_dense_dims,\n activation='relu',\n name='morph_dense2')(morph_label)\n morph_label = Dropout(dropout_level,\n name='morph_dense_dropout2')(morph_label)\n morph_label = Dense(nb_morph_cats,\n activation='relu',\n name='morph_dense3')(morph_label)\n morph_label = Dropout(dropout_level,\n name='morph_dense_dropout3')(morph_label)\n morph_label = Activation('tanh',\n name='morph_out')(morph_label)\n\n outputs.append(morph_label)\n\n loss_dict = {}\n if include_lemma:\n loss_dict['lemma_out'] = 'categorical_crossentropy'\n if include_pos:\n loss_dict['pos_out'] = 'categorical_crossentropy'\n if include_morph:\n if include_morph == 'label':\n loss_dict['morph_out'] = 'categorical_crossentropy'\n elif include_morph == 'multilabel':\n loss_dict['morph_out'] = 'binary_crossentropy'\n \n model = Model(input=inputs, output=outputs)\n if focus_repr == 'convolutions':\n model.compile(optimizer='SGD', loss=loss_dict)\n else:\n model.compile(optimizer='RMSprop', loss=loss_dict)\n \n return model","sub_path":"pandora/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":9388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"392527132","text":"# 模仿网上的版本使用python3进行重构\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nproxy = open('proxy.txt','w')\nproxy_list = open('proxy_list.txt','w')\n\n\nfor page in range(1,2):\n url = 'http://www.xicidaili.com/nn/%s'%page\n headers = {\n \"Host\": \"www.xicidaili.com\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:47.0) Gecko/20100101 Firefox/47.0\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Language\": \"en-US,en;q=0.5\",\n \"Accept-Encoding\": \"gzip, deflate\",\n }\n\n response = requests.request('GET',url, headers = headers)\n print(response.text)\n res = BeautifulSoup(response.text)\n # print(res.find_all(re.compile(\"\\d.\\d.\\d.\\d\")))\n data = []\n table = res.find('table', id=\"ip_list\")\n for row in table.findAll(\"tr\"):\n cells = row.findAll(\"td\")\n tmp = []\n for item in cells:\n tmp.append(item.find(text=True))\n try:\n tmp2 = tmp[1:2][0]\n tmp3 = tmp[2:3][0]\n tmp4 = tmp[5:6][0]\n data.append({tmp4: tmp2 + \":\" + tmp3})\n proxy.write( tmp4 + \":\" + tmp2 + \":\" + tmp3 + \"\\n\")\n except Exception as e:\n pass\n\n print(table)\n print(type('table'))\n print(type(table.findAll(\"tr\")))\n\n print(data)\n proxy_list.write(str(data))\n\n\n\n","sub_path":"attack/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"118863439","text":"from cec2019comp100digit import cec2019comp100digit\r\nfrom functions import *\r\n\r\n\r\nclass cec2019:\r\n\r\n num_of_func = 10\r\n\r\n def __init__(self, func_id):\r\n\r\n self.bench = cec2019comp100digit\r\n self.func_id = func_id\r\n if (self.func_id == 1):\r\n self.max = 8192\r\n self.min = -8192\r\n self.D = 9\r\n\r\n elif (self.func_id == 2):\r\n self.max = 16384\r\n self.min = -16384\r\n self.D = 16\r\n elif (self.func_id == 3):\r\n self.max = 4\r\n self.min = -4\r\n self.D = 18\r\n elif (self.func_id >= 4 and self.func_id <= 10):\r\n self.max = 100\r\n self.min = -100\r\n self.D = 10\r\n self.MAXFES = self.D * 10000\r\n self.N = 50\r\n self.bench.init(self.func_id, self.D)\r\n\r\n\r\n def get_Parameters(self):\r\n map = {'MAX':self.max, 'MIN':self.min, 'N':self.N, 'D':self.D, 'MAXFES':self.MAXFES}\r\n return map\r\n\r\n def func(self, x):\r\n return self.bench.eval(x)\r\n\r\n def end(self):\r\n self.bench.end()","sub_path":"example code/cec2019.py","file_name":"cec2019.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"194113459","text":"from native_tags.decorators import function\n\ndef get_func(func_name, op=True):\n import operator\n from native_tags.registry import register\n try:\n return register['function'][func_name]\n except KeyError:\n pass\n if func_name in __builtins__:\n return __builtins__[func_name]\n elif hasattr(operator, func_name):\n return getattr(operator, func_name)\n return lambda: None\n \ndef do_map(func_name, *sequence):\n \"\"\"\n Return a list of the results of applying the function to the items of\n the argument sequence(s). \n \n Functions may be registered with ``native_tags`` \n or can be ``builtins`` or from the ``operator`` module\n \n If more than one sequence is given, the\n function is called with an argument list consisting of the corresponding\n item of each sequence, substituting None for missing values when not all\n sequences have the same length. If the function is None, return a list of\n the items of the sequence (or a list of tuples if more than one sequence).\n\n Syntax::\n \n {% map [function] [sequence] %} \n {% map [function] [item1 item2 ...] %}\n\n For example::\n \n {% map sha1 hello world %}\n \n calculates::\n \n [sha1(hello), sha1(world)]\n\n \"\"\"\n\n if len(sequence)==1:\n sequence = sequence[0]\n return map(get_func(func_name, False), sequence)\ndo_map = function(do_map, name='map')\ndo_map.test = {'args':('ord','wtf'),'result':[119, 116, 102]}\n\ndef do_reduce(func_name, *sequence):\n \"\"\"\n Apply a function of two arguments cumulatively to the items of a sequence,\n from left to right, so as to reduce the sequence to a single value.\n \n Functions may be registered with ``native_tags`` \n or can be ``builtins`` or from the ``operator`` module\n \n Syntax::\n \n {% reduce [function] [sequence] %} \n {% reduce [function] [item1 item2 ...] %}\n \n For example::\n \n {% reduce add 1 2 3 4 5 %}\n \n calculates::\n \n ((((1+2)+3)+4)+5) = 15\n \"\"\"\n if len(sequence)==1:\n sequence = sequence[0]\n return reduce(get_func(func_name), sequence)\ndo_reduce = function(do_reduce, name='reduce')\ndo_reduce.test = {'args':('add',1,2,3,4,5),'result':15}\n","sub_path":"native_tags/contrib/mapreduce.py","file_name":"mapreduce.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"529794088","text":"import skil_client\nfrom .base import Resource\n\n\nclass EMR(Resource):\n \"\"\"EMR\n\n AWS Elastic Map Reduce compute resource\n\n # Arguments:\n skil: `Skil` server instance\n name: Name of the resource\n region: AWS region of the EMR cluster\n credential_uri: path to credential file\n cluster_id: ID of the EMR cluster\n resource_id: optional resource ID to retrieve an existing resource\n create: boolean, for internal use only. whether to create a new resource or not\n \"\"\"\n # TODO: if cluster_id is None, spin up a cluster and retrieve id (requires work in SKIL core)\n # TODO: can we hide setting credentials? i.e. can these be put into a\n # little config file (similar to what we do in pydl4j?).\n\n def __init__(self, skil, name, region, credential_uri, cluster_id=None,\n resource_id=None, create=True):\n\n super(EMR, self).__init__(skil)\n self.name = name\n self.region = region\n self.credential_uri = credential_uri\n self.cluster_id = cluster_id\n self.resource_id = resource_id\n\n if create:\n resource_response = self.skil.api.add_resource(skil_client.AddResourceRequest(\n resource_name=self.name,\n resource_details=skil_client.EMRResourceDetails(\n cluster_id=self.cluster_id,\n region=self.region\n ),\n credential_uri=self.credential_uri,\n type=\"COMPUTE\",\n sub_type=\"EMR\")\n )\n self.resource_id = resource_response.get(\"resourceId\")\n else:\n if resource_id is None:\n raise ValueError(\n 'If create is False you need to provide a valid resource_id')\n\n\nclass DataProc(Resource):\n \"\"\"DataProc\n\n Google cloud engine DataProc compute resource\n\n # Arguments:\n skil: `Skil` server instance\n name: Resource name\n project_id: GCE project ID\n region: GCE region\n spark_cluster_name: DataProc cluster name\n credential_uri: path to credential file\n resource_id: optional resource ID to retrieve an existing resource\n create: boolean, for internal use only. whether to create a new resource or not\n \"\"\"\n\n def __init__(self, skil, name, project_id, region, spark_cluster_name, credential_uri,\n resource_id=None, create=True):\n super(DataProc, self).__init__(skil)\n self.name = name\n self.project_id = project_id\n self.region = region\n self.credential_uri = credential_uri\n self.cluster_name = spark_cluster_name\n self.resource_id = resource_id\n if create:\n resource_response = self.skil.api.add_resource(skil_client.AddResourceRequest(\n resource_name=self.name,\n resource_details=skil_client.DataProcResourceDetails(\n project_id=self.project_id,\n region=self.region,\n spark_cluster_name=self.cluster_name\n ),\n credential_uri=self.credential_uri,\n type=\"COMPUTE\",\n sub_type=\"DataProc\")\n )\n self.resource_id = resource_response.get(\"resourceId\")\n else:\n if resource_id is None:\n raise ValueError(\n 'If create is False you need to provide a valid resource_id')\n\n\nclass HDInsight(Resource):\n \"\"\"HDInsight\n\n Azure HDInsight compute resource.\n\n # Arguments:\n skil: `Skil` server instance\n name: Resource name\n subscription_id: Azure subscription ID\n resource_group_name: Azure resource group name\n cluster_name: HDInsight cluster name\n credential_uri: path to credential file\n resource_id: optional resource ID to retrieve an existing resource\n create: boolean, for internal use only. whether to create a new resource or not\n \"\"\"\n\n def __init__(self, skil, name, subscription_id, resource_group_name, cluster_name, credential_uri,\n resource_id=None, create=True):\n super(HDInsight, self).__init__(skil)\n self.name = name\n self.subscription_id = subscription_id\n self.resource_group_name = resource_group_name\n self.cluster_name = cluster_name\n self.credential_uri = credential_uri\n self.resource_id = resource_id\n\n if create:\n resource_response = self.skil.api.add_resource(skil_client.AddResourceRequest(\n resource_name=self.name,\n resource_details=skil_client.HDInsightResourceDetails(\n subscription_id=self.subscription_id,\n resource_group_name=self.resource_group_name,\n cluster_name=self.cluster_name\n ),\n credential_uri=self.credential_uri,\n type=\"COMPUTE\",\n sub_type=\"HDInsight\")\n )\n self.resource_id = resource_response.get(\"resourceId\")\n else:\n if resource_id is None:\n raise ValueError(\n 'If create is False you need to provide a valid resource_id')\n\n\nclass YARN(Resource):\n \"\"\"YARN\n YARN compute resource for local Spark computation on YARN.\n # Arguments:\n skil: `Skil` server instance\n name: Resource name\n local_spark_home: full path to local Spark binary\n resource_id: optional resource ID to retrieve an existing resource\n create: boolean, for internal use only. whether to create a new resource or not\n \"\"\"\n\n def __init__(self, skil, name, local_spark_home, credential_uri,\n resource_id=None, create=True):\n super(YARN, self).__init__(skil)\n self.name = name\n self.local_spark_home = local_spark_home\n self.credential_uri = credential_uri\n self.resource_id = resource_id\n\n if create:\n resource_response = self.skil.api.add_resource(skil_client.AddResourceRequest(\n resource_name=self.name,\n resource_details=skil_client.YARNResourceDetails(\n local_spark_home=self.local_spark_home\n ),\n credential_uri=self.credential_uri,\n type=\"COMPUTE\",\n sub_type=\"YARN\")\n )\n self.resource_id = resource_response.get(\"resourceId\")\n else:\n if resource_id is None:\n raise ValueError(\n 'If create is False you need to provide a valid resource_id')\n","sub_path":"skil/resources/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":6614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"457950157","text":"\"\"\"Simultaneous fit to sign(PDF)*sign(FT_VAR) to measure wrong-tag fraction\"\"\"\nimport typing\n\nimport ROOT\n\nFILENAME = (\n \"/ghi/fs01/belle2/bdata/users/abudinen/flavorTagging/\"\n \"release-04-00-03TestSviatSel/Belle2_MC12_mixedb02all.root\"\n)\nTREENAME = \"variables\"\nPRECUT = \"isSignal && abs(FBDT_qrCombined) < 1.1\"\nMIX_PROB_TRUE = 0.1871\nmix_prob_true = ROOT.RooConstVar(\"mix_prob_true\", \"mix_prob_true\", MIX_PROB_TRUE)\n\n\ndef fit_for_wrong_tag_fraction(ft_var: str = \"mixFBDT\") -> typing.Tuple[float, float]:\n \"\"\"\n Use RooSimultaneous pdf to fit to product of PDG and flavour tagger variable ft_var\n :param ft_var: Flavour tagger variable you want to assess using fit\n :return: (measured value, error) of wrong-tag fraction\n \"\"\"\n\n def make_name(name: str) -> typing.Tuple[str, str]:\n \"\"\"Ensure a unique name for ROOT variables, in case this function is run in a loop during a single session.\"\"\"\n name = f\"{name}_{ft_var}\"\n return name, name\n\n # Independent variable\n x = ROOT.RooRealVar(ft_var, ft_var, -2, 2)\n\n # Also need to create RooAbsReals for the cut variables, namely isSignal and FBDT_qrCombined\n var_isSignal = ROOT.RooRealVar(\"isSignal\", \"isSignal\", -100, 100)\n var_FBDT_qrCombined = ROOT.RooRealVar(\n \"FBDT_qrCombined\", \"FBDT_qrCombined\", -100, 100\n )\n\n # This is what we want to measure using the fit -> must be floating\n start, mini, maxi = (\n 0.25,\n -0.001,\n 1.001,\n ) # Let it go slightly out of sensible limits (sometimes helps stability)\n wrong_tag_fraction = ROOT.RooRealVar(\n *make_name(\"wrong_tag_fraction\"), start, mini, maxi\n )\n\n # Dilution factor is used in both SF and OF PDFs (see next block)\n dilution_formula = \"(1 - 2*@0)*(1 - 2*@1)\" # 0: wrong-tag fraction, 1: physical B-mixing probability (constant)\n dilution_args = ROOT.RooArgList(wrong_tag_fraction, mix_prob_true)\n dilution = ROOT.RooFormulaVar(\n *make_name(\"dilution\"), dilution_formula, dilution_args\n )\n\n # Formulas for PDFs (SF = same flavour, OF = opposite flavour, referring to flavours of tag- and sig-B mesons)\n prob_SF_formula = \"0.5 * (1 - @0)\"\n prob_OF_formula = \"0.5 * (1 + @0)\"\n pdf_args = ROOT.RooArgList(dilution)\n prob_SF = ROOT.RooGenericPdf(*make_name(\"SF\"), prob_SF_formula, pdf_args)\n prob_OF = ROOT.RooGenericPdf(*make_name(\"OF\"), prob_OF_formula, pdf_args)\n\n # First need to reduce datasets by applying cut, so need RooDataSet with both fit variable and cut variables\n data_args = ROOT.RooArgSet(x, var_isSignal, var_FBDT_qrCombined)\n\n def create_category_dataset(name: str, cut: str) -> ROOT.RooDataSet:\n \"\"\"Create a RooDataSet which contains the fitting variable and the variables we need for cuts.\"\"\"\n ds = ROOT.RooDataSet(\n *make_name(name),\n data_args,\n ROOT.RooFit.ImportFromFile(FILENAME, TREENAME),\n ROOT.RooFit.Cut(cut),\n )\n return ds\n\n data_SF_all = create_category_dataset(\n \"data_SF_all\", cut=PRECUT + f\" && {ft_var} > 0\"\n )\n data_OF_all = create_category_dataset(\n \"data_OF_all\", cut=PRECUT + f\" && {ft_var} < 0\"\n )\n\n # Now create subset RooDataSet with just the fit variable, which can be used for RooSimultaneous\n x_set = ROOT.RooArgSet(x)\n data_sf = ROOT.RooDataSet(*make_name(\"sf\"), data_SF_all, x_set)\n data_of = ROOT.RooDataSet(*make_name(\"of\"), data_OF_all, x_set)\n\n # Create combined dataset / set-up for RooSimultaneous\n category = ROOT.RooCategory(*make_name(\"category\"))\n # Categories are linked to PDFs in 1->1 mapping (1=SF, -1=OF)\n pdfs = {\n \"1\": prob_SF,\n \"-1\": prob_OF,\n }\n for flavour in pdfs:\n category.defineType(flavour)\n\n comb_data = ROOT.RooDataSet(\n *make_name(\"comb\"),\n x_set,\n ROOT.RooFit.Index(category),\n ROOT.RooFit.Import(\"1\", data_sf),\n ROOT.RooFit.Import(\"-1\", data_of),\n )\n flavor_mix_prob = ROOT.RooSimultaneous(*make_name(\"flavor_mix_prob\"), category)\n for flavour, pdf in pdfs.items():\n flavor_mix_prob.addPdf(pdf, flavour)\n\n # Perform fit\n result = flavor_mix_prob.fitTo(\n comb_data,\n ROOT.RooFit.Minos(False),\n ROOT.RooFit.Extended(False),\n ROOT.RooFit.Save(True),\n )\n result.Print()\n value = wrong_tag_fraction.getVal()\n error = wrong_tag_fraction.getError()\n return value, error\n\n\nif __name__ == '__main__':\n import pandas as pd\n\n results = {}\n\n for var in \"mixFANN mixDNN mixFBDT\".split():\n value, error = fit_for_wrong_tag_fraction(var)\n results[var] = {'value': value, 'error': error}\n\n df = pd.DataFrame(results)\n print(df)\n","sub_path":"flavour_tagging/fit_for_wrong_tag_fraction.py","file_name":"fit_for_wrong_tag_fraction.py","file_ext":"py","file_size_in_byte":4731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"109134891","text":"# Author: Simon Blanke\n# Email: simon.blanke@yahoo.com\n# License: MIT License\n\nimport time\nimport numbers\n\n\ndef is_numeric(variable):\n return isinstance(variable, numbers.Number)\n\n\nclass Model:\n def __init__(self, func_, nth_process, _main_args_):\n self.func_ = func_\n self.nth_process = nth_process\n self.X = _main_args_.X\n self.y = _main_args_.y\n\n def train_model(self, para_dict):\n start_time = time.time()\n results = self.func_(para_dict, self.X, self.y)\n eval_time = time.time() - start_time\n\n if isinstance(results, tuple):\n self.n_results = len(results)\n\n score = results[0]\n self.rest = results[1]\n else:\n self.n_results = 1\n score = results\n self.rest = None\n\n if is_numeric(score):\n return score, eval_time\n else:\n print(\"Error: model function must return numeric variable\")\n","sub_path":"hyperactive/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"577702790","text":"#!/usr/bin/python3.4\n\"\"\"\nTODO\n\"\"\"\nimport testManager\nimport xml.etree.ElementTree as ET\nimport re\nimport os.path\n\n__author__ = 'heliosantos99@gmail.com (Helio Santos)'\n\n\ndef parse_test_plan(file_path, scripts_folder):\n tree = ET.parse(file_path)\n root = tree.getroot()\n allNodes = []\n for child in root:\n allNodes += parse_test_plan_node(child)\n\n testPlan = testManager.LoopNode(1, allNodes)\n add_script_to_test(testPlan, scripts_folder)\n\n return testPlan\n\n\ndef parse_test_plan_node(node, params={}):\n if node.tag == 'ApiParameters':\n params = parse_api_parameters(node, params)\n\n children = []\n for child in node:\n children += parse_test_plan_node(child, params)\n\n if node.tag == 'ApiParameters':\n return children\n\n elif node.tag == 'Loop':\n return [testManager.LoopNode(node.attrib.get('num'), children)]\n\n elif node.tag == 'Test':\n test = testManager.TestNode(node.attrib.get('name'),\n node.attrib.get('scriptFile'))\n firstStep = node.attrib.get('firstStep')\n if firstStep is not None:\n test.firstStep = int(firstStep)\n lastStep = node.attrib.get('lastStep')\n if lastStep is not None:\n test.lastStep = int(lastStep)\n if len(node) == 1:\n params = dict(params, **parse_api_parameters(node[0], params))\n else:\n params = dict(params)\n test.apiParameters = params\n\n return [test]\n\n else:\n raise Exception(\"You are doing it wrong. (tag = %s)\" % node.tag)\n\n\ndef parse_api_parameters(node, params={}):\n if node.tag == 'ApiParameters':\n return dict(params, **node.attrib)\n raise Exception(\"You are doing it wrong. (tag = %s)\" % node.tag)\n\n\ndef add_script_to_test(test, scripts_folder):\n if test.isTestNode:\n parse_test_steps(test, scripts_folder)\n else:\n for child in test.children:\n add_script_to_test(child, scripts_folder)\n\n\ndef parse_test_steps(test, scripts_folder):\n allSteps = []\n with open(os.path.join(scripts_folder, test.script_file)) as script_file:\n script = script_file.read()\n for match in re.finditer(\n r'//>\\s?Step#(?P\\d+)'\n ' (?P.*?)(?=//> Step|$)',\n script, re.IGNORECASE | re.DOTALL):\n\n step = {}\n step['name'] = match.group('stepName')\n step['num'] = int(match.group('stepNumber'))\n step['script'] = match.group(0)\n allSteps.append(step)\n\n executedSteps = []\n executedStepsString = ''\n for step in allSteps:\n if (test.firstStep is None or test.firstStep <= step['num']) and (\n test.lastStep is None or test.lastStep >= step['num']):\n executedSteps.append(step)\n executedStepsString += step['script']\n\n test.set_script(executedStepsString)\n customMetrics = parse_custom_metrics(executedSteps[-1]['script'])\n if customMetrics is not None:\n test.set_custom_metrics(customMetrics)\n\n\ndef parse_custom_metrics(stepScript):\n matches = re.finditer(\n \"(?<=//> CustomMetrics\\n)(?:[^\\n]*//>[^\\n]*\\n?)*\",\n stepScript, re.IGNORECASE | re.DOTALL)\n\n customMetrics = next(matches, None)\n\n if customMetrics is None:\n return None\n\n customMetrics = re.sub(\n r\"//>\", \"\", customMetrics.group(0), 0, re.IGNORECASE | re.DOTALL)\n\n if next(matches, None) is not None:\n raise Exception('Each step must contain not more '\n 'than one CustomMetrics block')\n\n return customMetrics\n","sub_path":"testManagerParser.py","file_name":"testManagerParser.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"239821141","text":"\"\"\"\nGiven a number n, count the total number of digits required to write all numbers from 1 to n.\n\nInput:\nThe first line of input contains an integer T denoting the number of test cases. Then T test cases follow. Each test case contains an integer n.\n\nOutput:\nPrint the total number of digits required to write all numbers from 1 to n.\n\nConstraints:\n1<=T<=10^5\n1<=n<=10^5\n\nExample:\nInput:\n2\n13\n4\n\nOutput:\n17\n4\n\"\"\"\n\n\ndef total_digits(n):\n s = 0\n for i in range(0, len(str(n)) - 1):\n s += 9 * 10 ** i * (i + 1)\n a = 10 ** (len(str(n)) - 1) - 1\n s += (n - a) * len(str(n))\n return s\n\n\nif __name__ == '__main__':\n t = int(input())\n for i in range(t):\n n = int(input())\n print(total_digits(n))\n","sub_path":"practice/Basic/total_digits.py","file_name":"total_digits.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"57431265","text":"#!/usr/bin/env python3\n\"\"\"\nDefines class Yolo that uses the Yolo v3 algorithm to perform object detection\n\"\"\"\nimport tensorflow.keras as K\nimport numpy as np\n\n\nclass Yolo:\n \"\"\"\n Class that uses Yolo v3 algorithm to perform object detection\n \"\"\"\n\n def __init__(self, model_path, classes_path, class_t, nms_t, anchors):\n \"\"\"\n Yolo class constructor\n \"\"\"\n self.model = K.models.load_model(model_path)\n with open(classes_path, 'r') as f:\n lines = f.readlines()\n self.class_names = []\n for name in lines:\n self.class_names.append(name[:-1])\n self.class_t = class_t\n self.nms_t = nms_t\n self.anchors = anchors\n\n @staticmethod\n def sigmoid(x):\n \"\"\"\n Returns the output after passing through Sigmoid function\n output will be between 0 and 1\n \"\"\"\n return (1. / (1. + np.exp(-x)))\n\n def process_outputs(self, outputs, image_size):\n \"\"\"\n Processes the outputs\n \"\"\"\n boxes = []\n box_confidences = []\n box_class_probs = []\n for i, output in enumerate(outputs):\n anchors = self.anchors[i]\n grid_height, grid_width = output.shape[:2]\n\n t_xy = output[..., :2]\n t_wh = output[..., 2:4]\n\n sigmoid_conf = self.sigmoid(output[..., 4])\n sigmoid_prob = self.sigmoid(output[..., 5:])\n\n box_conf = np.expand_dims(sigmoid_conf, axis=-1)\n box_class_prob = sigmoid_prob\n\n box_confidences.append(box_conf)\n box_class_probs.append(box_class_prob)\n\n b_wh = anchors * np.exp(t_wh)\n b_wh /= self.model.inputs[0].shape.as_list()[1:3]\n\n grid = np.tile(np.indices((grid_width, grid_height)).T,\n anchors.shape[0]).reshape(\n (grid_height, grid_width) + anchors.shape)\n\n b_xy = (self.sigmoid(t_xy) + grid) / [grid_width, grid_height]\n\n b_xy1 = b_xy - (b_wh / 2)\n b_xy2 = b_xy + (b_wh / 2)\n box = np.concatenate((b_xy1, b_xy2), axis=-1)\n box *= np.tile(np.flip(image_size, axis=0), 2)\n\n boxes.append(box)\n return (boxes, box_confidences, box_class_probs)\n","sub_path":"supervised_learning/0x0A-object_detection/1-yolo.py","file_name":"1-yolo.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"54930478","text":"\r\nfrom multiprocessing import Process, Value, Array, Lock, Condition\r\n\r\nimport os\r\nimport random\r\nimport sys\r\nimport time\r\n\r\nclass PingPong :\r\n def __init__ (self) :\r\n self.previous = Value('i', 1, lock=False)\r\n self.available = Value('i', 1, lock=False)\r\n self.verrou = Lock()\r\n self.acces = [Condition(self.verrou), Condition(self.verrou)]\r\n\r\n def acceder (self, rang, side) :\r\n with self.verrou :\r\n while self.previous.value == side or self.available.value==0:\r\n #print('Player', rang, \"Blocked on side \", side)\r\n self.acces[side].wait()\r\n #print('Player', rang, \"Unblocked on side \", side)\r\n #print('Player', rang, \" has been granted access to \", side)\r\n self.available.value = 0\r\n\r\n def liberer (self, rang, side) :\r\n with self.verrou:\r\n self.available.value = 1\r\n self.previous.value = side\r\n #print('Player', rang, \"Leaving on side \", side)\r\n self.acces[(side+1)%2].notify()\r\n\r\ndef player (rang, side, moniteur) :\r\n time.sleep(.1 + random.random())\r\n print(\"Player\", rang, \"asks\", side)\r\n moniteur.acceder(rang, side)\r\n print(\"Player\", rang, \"enters\", side)\r\n time.sleep(.2 + random.random())\r\n moniteur.liberer(rang, side)\r\n\r\n####\r\n\r\nif __name__ == '__main__' :\r\n\r\n if len(sys.argv) != 2 :\r\n print(\"Usage %s \", sys.argv[0])\r\n sys.exit(1)\r\n\r\n nbPlayers = int(sys.argv[1])\r\n\r\n processes = []\r\n table = PingPong()\r\n\r\n for rang_proc in range(nbPlayers) :\r\n proc = Process(target=player, args=(rang_proc, rang_proc%2, table))\r\n processes.append(proc)\r\n proc.start()\r\n\r\n for rang_proc in range(nbPlayers) :\r\n proc.join()\r\n","sub_path":"M1/S7/parallelisme/Moniteurs/TP1/Lesson example - Ping Pong.py","file_name":"Lesson example - Ping Pong.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"355685846","text":"import random\n\n\nclass Card:\n def __init__(self, suit, rank):\n self.suit = suit\n self.rank = rank\n self.value_str = self.get_value_str(rank)\n self.point_value = self.get_value(rank)\n\n @staticmethod\n def get_value_str(rank):\n if rank == 1:\n return \"A\"\n elif rank == 11:\n return \"J\"\n elif rank == 12:\n return \"Q\"\n elif rank == 13:\n return \"K\"\n elif rank == -1:\n return \"Joker\"\n else:\n return str(rank)\n\n @staticmethod\n def get_value(rank):\n ## mainly for blackjack or canasta\n return rank\n\n @staticmethod\n def get_suit(suit):\n if suit == 0:\n return \"Spades\"\n elif suit == 1:\n return \"Clubs\"\n elif suit == 2:\n return \"Hearts\"\n elif suit == 3:\n return \"Diamonds\"\n return \"\"\n\n def is_ace(self):\n return self.rank == 1\n\n def is_joker(self):\n return self.rank == -1\n\n def __str__(self):\n if self.suit < 0:\n return self.value_str\n return self.value_str + \" of \" + self.get_suit(self.suit)\n\n def __eq__(self, other):\n if isinstance(other, Card):\n return self.suit == other.suit and self.rank == other.rank\n return NotImplemented\n\n\nclass Deck:\n CARDS_PER_DECK = 52\n CARDS_PER_DECK_WITH_JOKERS = 54\n\n def __init__(self, number_of_decks, jokers=False):\n self.removed_cards = 0\n number_of_decks = max(number_of_decks, 1) # at least one deck\n self.total_cards = number_of_decks * self.CARDS_PER_DECK_WITH_JOKERS if jokers else self.CARDS_PER_DECK\n self.cards = self.reset_deck(number_of_decks, jokers)\n self.current = iter(self.cards)\n self.shuffle_method = None\n\n @staticmethod\n def reset_deck(num, jokers=False):\n cards = []\n for n in range(num):\n for x in range(1, 14):\n for y in range(4):\n cards.append(Card(y, x))\n if jokers:\n cards.append(Card(-1, -1))\n cards.append(Card(-1, -1))\n\n return cards\n\n # def shuffle(self):\n # self.removed_cards = 0\n # self.current = iter(self.cards)\n #\n # if self.shuffle_method is not None:\n # self.shuffle_method.shuffle(self.cards)\n # else:\n # random.shuffle(self.cards)\n\n def apply_shuffle_method(self, shuffle_method):\n self.shuffle_method = shuffle_method\n\n def draw(self):\n if self.remaining_cards() == 0:\n self.shuffle()\n self.removed_cards += 1\n return next(self.current)\n\n def print_deck(self):\n i = 0\n for c in self.cards:\n print(c)\n i += 1\n print(\"total cards: \" + str(i))\n\n def remaining_cards(self):\n return self.total_cards - self.removed_cards\n\n# d = Deck(1, True)\n# d.print_deck()\n","sub_path":"Items/Items.py","file_name":"Items.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"615867918","text":"import pickle\nimport gensim\nimport numpy as np\n\n\ndef read2txt(position):\n data = []\n f = open(position)\n line = f.readline()\n while line:\n words = line[:len(line) - 1].split('|')\n data.append(words)\n line = f.readline()\n f.close()\n return data\n\n\nmodel = gensim.models.Word2Vec.load('../data/totalModel')\ndata = read2txt(\"../data/washedTarget.txt\")\ntrainX = []\ntrainY = []\nfor index, row in enumerate(data):\n if index == 0:\n continue\n words = row[0].split(' ')\n tempX = np.zeros(300)\n for word in words:\n if not word:\n tempX += model['fine']\n else:\n tempX += model[word]\n tempX /= len(words)\n trainX.append(tempX)\n # tempY = [int(row[1]), int(row[2]), int(row[3]), int(row[4]), int(row[5]), int(row[6])]\n # trainY.append(tempY)\n\noutput = open('data.pkl', 'wb')\npickle.dump(trainX, output, -1)\n# pickle.dump(trainY, output)\n#output.close()\n","sub_path":"AML/codes/vachor/wordVec2sentence.py","file_name":"wordVec2sentence.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"625382005","text":"from __future__ import print_function\n\nimport BaseHTTPServer\nimport getpass\nimport logging\nimport urllib2\n\nfrom mrproxy import UserProxyHandler\n\n\n# Workaround for https://github.com/gmjosack/mrproxy/issues/2\nclass ProxyHandler(UserProxyHandler):\n def do_request(self, request):\n try:\n url = urllib2.urlopen(request)\n code = url.getcode()\n headers = str(url.info())\n data = url.read()\n except urllib2.HTTPError as err:\n code = err.getcode()\n headers = str(err.info())\n data = err.read()\n except urllib2.URLError as err:\n code = 503\n headers = str(err)\n data = \"503 Service Unavailable: %s\\n\" % err\n\n self.send_response(code)\n self.wfile.write(headers)\n self.end_headers()\n self.wfile.write(data)\n\n\ndef build_user_proxy_server(username, backend_port, listen_host, listen_port):\n class ServerArgs(object):\n def __init__(self, backend_port, username):\n self.backend_port = backend_port\n self.header = [\"X-Grouper-User: %s\" % username]\n\n server = BaseHTTPServer.HTTPServer((listen_host, listen_port), ProxyHandler)\n server.args = ServerArgs(backend_port, username)\n\n return server\n\n\ndef user_proxy_command(args):\n username = args.username\n if username is None:\n username = getpass.getuser()\n logging.debug(\"No username provided, using (%s)\", username)\n\n server = build_user_proxy_server(\n args.username, args.backend_port, args.listen_host, args.listen_port\n )\n try:\n logging.info(\n \"Starting user_proxy on host (%s) and port (%s) with user (%s)\",\n args.listen_host,\n args.listen_port,\n username,\n )\n server.serve_forever()\n except KeyboardInterrupt:\n print(\"Bye!\")\n\n\ndef add_parser(subparsers):\n user_proxy_parser = subparsers.add_parser(\n \"user_proxy\", help=\"Start a development reverse proxy.\"\n )\n user_proxy_parser.set_defaults(func=user_proxy_command)\n user_proxy_parser.add_argument(\"--listen-host\", default=\"localhost\", help=\"Host to listen on.\")\n user_proxy_parser.add_argument(\n \"-p\", \"--listen-port\", default=8888, type=int, help=\"Port to listen on.\"\n )\n user_proxy_parser.add_argument(\n \"-P\", \"--backend-port\", default=8989, type=int, help=\"Port to proxy to.\"\n )\n user_proxy_parser.add_argument(\"username\", nargs=\"?\", default=None)\n","sub_path":"grouper/ctl/user_proxy.py","file_name":"user_proxy.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"625569505","text":"#----------------------------------------------\n# -*- encoding=utf-8 -*- #\n# __author__:'焉知飞鱼' #\n# CreateTime: #\n# 2019/9/23 14:41 #\n# #\n# 天下风云出我辈, #\n# 一入江湖岁月催。 #\n# 皇图霸业谈笑中, #\n# 不胜人生一场醉。 #\n#----------------------------------------------\n# 连续动作,输出的是动作的概率分布,正太分布,然后从该分布中采样出一个动作。\n# https://github.com/zhijie-ai/tensorflow_practice/blob/master/RL/Basic-PPO-Demo/simple-PPO.py\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport gym\n\nEP_MAX = 200\nEP_LEN = 200\nGAMMA = 0.9\nA_LR = 0.0001\nC_LR = 0.0002\nBATCH = 32\nA_UPDATE_STEPS = 10\nC_UPDATE_STEPS = 10\nS_DIM, A_DIM = 3, 1\nMETHOD = [\n dict(name='kl_pen', kl_target=0.01, lam=0.5), # KL penalty\n dict(name='clip', epsilon=0.2), # Clipped surrogate objective\n][1]\n\nclass PPO():\n def __init__(self):\n self.sess = tf.Session()\n self.tfs = tf.placeholder(tf.float32,[None,S_DIM],'state')\n\n # critic\n with tf.variable_scope('critic'):\n l1 = tf.layers.dense(self.tfs,100,tf.nn.relu)\n self.v = tf.layers.dense(l1,1) # state-value\n self.tfdc_r = tf.placeholder(tf.float32,[None,1],'discounted_r')\n self.advantage = self.tfdc_r-self.v\n self.closs = tf.reduce_mean(tf.square(self.advantage))\n self.ctrin_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)\n\n #actor\n pi,pi_params = self._build_anet('pi',trainable=True)\n oldpi,oldpi_params = self._build_anet('oldpi',trainable=False)# 不参与训练\n with tf.variable_scope('sample_action'):\n self.sample_op = tf.squeeze(pi.sample(1),axis=0)\n with tf.variable_scope('update_oldpi'):\n self.update_oldpi_op = [oldp.assign(p) for p,oldp in zip(pi_params,oldpi_params)]\n\n self.tfa = tf.placeholder(tf.float32,[None,A_DIM],'action')\n self.tfadv = tf.placeholder(tf.float32,[None,1],'advantage')\n with tf.variable_scope('loss'):\n with tf.variable_scope('surrogate'):\n ratio = pi.prob(self.tfa)/oldpi.prob(self.tfa)#概率密度函数\n surr = ratio*self.tfadv\n\n if METHOD['name']=='kl_pen':\n self.tflam = tf.placeholder(tf.float32,None,'lambda')\n kl = tf.distributions.kl_divergence(oldpi,pi)\n self.kl_mean = tf.reduce_mean(kl)\n self.aloss = -tf.reduce_mean(surr-self.tflam*kl)\n else:\n self.aloss = -tf.reduce_mean(\n tf.minimum(surr,\n tf.clip_by_value(ratio,\n 1-METHOD['epsilon'],\n 1. + METHOD['epsilon']) * self.tfadv))\n\n with tf.variable_scope('atrain'):\n self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)\n\n tf.summary.FileWriter('log/',self.sess.graph)\n\n self.sess.run(tf.global_variables_initializer())\n\n def update(self,s,a,r):\n self.sess.run(self.update_oldpi_op)\n adv = self.sess.run(self.advantage,{self.tfs:s,self.tfdc_r:r})# 得到advantage value\n\n # update actor\n if METHOD['name'] =='kl_pen':\n for _ in range(A_UPDATE_STEPS):\n _,kl = self.sess.run([self.atrain_op,self.kl_mean],\n {self.tfs:s,self.tfa:a,\n self.tfadv:adv,self.tflam:METHOD['lam']})\n if kl>4*METHOD['kl_target']:\n break\n elif kl < METHOD['kl_target']/1.5:# adaptive lambda,this is in OpenAi's paper\n METHOD['lam']/= 2\n elif kl> METHOD['kl_target'] *1.5:\n METHOD['lam']*=2\n METHOD['lam'] = np.clip(METHOD['lam'],1e-4,10)#sometimes explode,this clipping is my solution\n\n else: # clipping method,find this is better(OpenAI's paper)\n [self.sess.run(self.atrain_op,{self.tfs:s,self.tfa:a,self.tfadv:adv})\n for _ in range(A_UPDATE_STEPS)]\n\n #update critic\n [self.sess.run(self.ctrin_op,{self.tfs:s,self.tfdc_r:r}) for _ in range(C_UPDATE_STEPS)]\n\n def _build_anet(self,name,trainable):\n with tf.variable_scope(name):\n l1 = tf.layers.dense(self.tfs,100,tf.nn.relu,trainable=trainable)\n mu = 2*tf.layers.dense(l1,A_DIM,tf.nn.tanh,trainable=trainable)\n sigma = tf.layers.dense(l1,A_DIM,tf.nn.softplus,trainable=trainable)\n norm_dist = tf.distributions.Normal(loc=mu,scale=sigma) # 一个正太分布\n params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope=name)\n return norm_dist,params\n\n def choose_action(self,s):\n s = s[np.newaxis,:]\n a = self.sess.run(self.sample_op,{self.tfs:s})[0]\n return np.clip(a,-2,2)\n\n def get_v(self,s):\n if s.ndim <2:s=s[np.newaxis,:]\n return self.sess.run(self.v,{self.tfs:s})[0,0]\n\n\n\nenv = gym.make('Pendulum-v0').unwrapped\nprint(env.action_space)#Box(1,)\nppo = PPO()\nall_ep_r = []\n\nfor ep in range(EP_MAX):\n s = env.reset()\n buffer_s,buffer_a,buffer_r = [],[],[]\n ep_r = 0\n for t in range(EP_LEN): # in one episode\n # env.render()\n a = ppo.choose_action(s) # 根据正太分布,选择一个action\n s_,r,done,_ = env.step(a)#done不会取到false\n buffer_s.append(s)\n buffer_a.append(a)\n buffer_r.append((r+8)/8) # nomalized reward,find to be useful\n s = s_\n ep_r += r\n\n\n # update ppo\n if(t+1)%BATCH ==0 or t == EP_LEN-1:\n v_s_ = ppo.get_v(s_)#下一个状态的值\n discounted_r = []\n for r in buffer_r[::-1]:\n v_s_ = r+GAMMA*v_s_\n discounted_r.append(v_s_) # v(s)=r+gamma*v(s+1)\n discounted_r.reverse()\n\n bs,ba,br = np.vstack(buffer_s),np.vstack(buffer_a),np.array(discounted_r)[:,np.newaxis]\n buffer_s,buffer_a,buffer_r = [],[],[]\n ppo.update(bs,ba,br)\n if ep == 0:all_ep_r.append(ep_r)\n else: all_ep_r.append(all_ep_r[-1]*0.9+ep_r*0.1)\n print('Ep:%i'%ep,\n '|Ep_r:%i'%ep_r,\n ('|Lam:%.4f'%METHOD['lam']) if METHOD['name']=='kl_pen' else '')\n\nplt.plot(np.arange(len(all_ep_r)),all_ep_r)\nplt.xlabel('Episode')\nplt.ylabel('Moving averaged episode reward')\nplt.show()\n","sub_path":"liujianping/simple-PPO.py","file_name":"simple-PPO.py","file_ext":"py","file_size_in_byte":6762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"525670279","text":"# region -------------Info------------\n# Name: http file server\n# Version: 1.0\n# By: Or Abramovich\n# endregion -------------Info------------\n\n# region -------------Imports---------\nimport ConnectionHandler\n# endregion -------------Imports---------\n\n# region -------------Constants-------\n\n# endregion -------------Constants-------\n\n# region -------------Methods---------\n\n# endregion -------------Methods---------\n\n# region -------------Main------------\n\n\ndef main():\n connection_handler = ConnectionHandler.ConnectionHandler()\n while True:\n connection_handler.handle_connections()\n\n\nif __name__ == '__main__':\n main()\n# endregion -------------Main------------\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"364817843","text":"# Copyright 2015 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom recipe_engine import recipe_api\n\nclass GomaApi(recipe_api.RecipeApi):\n \"\"\"GomaApi contains helper functions for using goma.\"\"\"\n\n def update_goma_canary(self):\n \"\"\"Returns a step for updating goma canary.\"\"\"\n # for git checkout, should use @refs/heads/master to use head.\n head = 'refs/heads/master'\n self.m.gclient('update goma canary',\n ['sync', '--verbose', '--force',\n '--revision', 'build/goma@%s' % head],\n cwd=self.m.path['build'])\n","sub_path":"scripts/slave/recipe_modules/goma/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"208640352","text":"import torch\nimport numpy as np\nfrom os import listdir\nimport torchvision.transforms as transforms\nimport os\nfrom PIL import Image\nimport pandas as pd\n\ntransform_train = transforms.Compose([\n transforms.ToTensor()\n])\nclass FlorinDataset():\n def __init__(self, file_root_blur,file_root_sharp,transform_train):\n\n\n self.file_root_blur=file_root_blur\n self.file_root_sharp=file_root_sharp\n self.data_input= pd.read_csv(file_root_blur,sep=' , ',header=None).values\n self.data_target= pd.read_csv(file_root_sharp,sep=' , ',header=None).values\n self.transform_train=transform_train\n\n\n\n def __len__(self):\n return len(self.data_input)\n\n\n def __getitem__(self, index):\n for i in range(0,5):\n if i==0:\n inputs=Image.open(self.data_input[index][i])\n inputs=transforms.ToTensor()(inputs)\n else:\n input = Image.open(self.data_input[index][i])\n input = transforms.ToTensor()(input)\n inputs = torch.cat((inputs, input), 0)\n if i==2 :\n targets=Image.open(self.data_target[index][i])\n targets=transforms.ToTensor()(targets)\n\n return inputs,targets\n\n\n","sub_path":"FlorinDataset.py","file_name":"FlorinDataset.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"270202568","text":"import numpy as np\r\n\r\nclass Node:\r\n def __init__(self, vector, train_data, pos, neg, default_category):\r\n \"\"\"\r\n :param vector: Is the vector with the properties.\r\n :param train_data: A 2d numpy array with shape ( len(reviews), len(vocabulary)+1 ) where each row represents one \r\n review. Every review is a binary numpy array. The first item indicates the category of the \r\n review and the following items whether or not this review contains a specific index (word).\r\n :param pos: A counter that contains the size of the positive reviews\r\n :param neg: A counter that contains the size of the negative reviews\r\n :param default_category: A string that is either \"positive\" either \"negative\"(In the first call from the main, \r\n default_category is \"positive\")\r\n \"\"\"\r\n self.left = None\r\n self.right = None\r\n self.vector = vector\r\n self.train_data = train_data\r\n self.pos = pos\r\n self.neg = neg\r\n self.default_category = default_category\r\n \r\nclass ID3:\r\n def insert(self, vector, train_data, pos, neg, default_category, i):\r\n \"\"\"\r\n :param vector: Is the vector with the properties.\r\n :param train_data: A 2d numpy array with shape ( len(reviews), len(vocabulary)+1 ) where each row represents one \r\n review. Every review is a binary numpy array. The first item indicates the category of the \r\n review and the following items whether or not this review contains a specific index (word).\r\n :param pos: A counter that contains the size of the positive reviews\r\n :param neg: A counter that contains the size of the negative reviews\r\n :param default_category: A string that is either \"positive\" either \"negative\"(In the first call from the main, \r\n default_category is \"positive\")\r\n :param i: \"i\" is an extra parameter that is used to check if each one of the reviews (each_train) that I examine now \r\n contains the property (which is stored in the vector). len(review) = len(vector) + 1, review[0] = 1=>positive \r\n reviews[0] = 0=>negative \r\n (In the first call from the main, i = 1). \r\n So, in each reconstruction, I increase the variable i to check the next property.\r\n I only check if review[i] == 1 and I am not using the vector, because the train_data length depends on vector\r\n and also train_data constructed after the vector's construction.\r\n E.g. If vector contains 100 words(probably the indexes of 100 words), then each_train in train_data will be \r\n a vectored_review that has 101 values. First value to separate if train is positive or negative, second value \r\n to separate if the word with the biggest IG(which is the first in vector) is in the review(each_train),\r\n third etc. That's also the reason that i am not taking vector[0] to see if the word is in the review\r\n (each_train). Because I already know by accessing each review if the review(each_train) contains the word that\r\n described in the vector. I just use a vector as a counter whose length is reduced by one in each division.\r\n \"\"\"\r\n node = Node(vector, train_data, pos, neg, default_category)\r\n \r\n if len(train_data) == 0:\r\n return node\r\n elif pos > 0.95 * len(train_data):\r\n return node\r\n elif neg > 0.95 * len(train_data):\r\n return node\r\n elif len(vector) == 0:\r\n if pos > neg:\r\n return node\r\n elif neg > pos:\r\n return node\r\n else:\r\n vector = np.delete(vector, 0)\r\n left_data = []\r\n right_data = []\r\n left_counter_neg = 0\r\n left_counter_pos = 0\r\n right_counter_neg = 0\r\n right_counter_pos = 0\r\n for each_train in train_data:\r\n if each_train[i] == 1:\r\n left_data.append(each_train)\r\n if each_train[0] == 1:\r\n left_counter_pos += 1\r\n else:\r\n left_counter_neg += 1\r\n else:\r\n right_data.append(each_train)\r\n if each_train[0] == 1:\r\n right_counter_pos += 1\r\n else:\r\n right_counter_neg += 1\r\n if left_counter_pos > left_counter_neg:\r\n m1 = \"positive\"\r\n else:\r\n m1 = \"negative\"\r\n if right_counter_pos > right_counter_neg:\r\n m2 = \"positive\"\r\n else:\r\n m2 = \"negative\"\r\n left_data = np.array(left_data)\r\n right_data = np.array(right_data)\r\n l = i + 1\r\n node.left = self.insert(vector, left_data, left_counter_pos, left_counter_neg, m1, l)\r\n r = i + 1\r\n node.right = self.insert(vector, right_data, right_counter_pos, right_counter_neg, m2, r)\r\n return node\r\n","sub_path":"Project 2/src/ID3.py","file_name":"ID3.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"377593407","text":"from csv import writer\nfrom json import dumps\nfrom requests import post\nimport time\n\nheaders = {\"Authorization\": \"token \"}\n\nrepositoriesQuery = \"\"\"\nquery repositoriesQuery {\n search(type: REPOSITORY, first: 100, query: \"stars:>100 created:>=2016-01-01 language:python\"{AFTER}) {\n pageInfo {\n hasNextPage\n endCursor\n }\n nodes {\n ... on Repository {\n id\n nameWithOwner\n url\n stargazers {\n totalCount\n }\n issues {\n totalCount\n }\n }\n }\n }\n}\n\"\"\"\n\nissuesQuery = \"\"\"\nquery example {\n repository(owner: \"{OWNER}\", name: \"{NAME}\"){\n issues(first: 10, orderBy:{field: CREATED_AT, direction: ASC}{AFTER}){\n pageInfo{\n hasNextPage\n endCursor\n }\n nodes {\n id\n title\n createdAt\n closedAt\n closed\n }\n } \n }\n}\n\"\"\"\n\ndef runQuery(query):\n request = post(\n 'https://api.github.com/graphql', json={'query': query}, headers=headers\n )\n while (request.status_code == 502):\n time.sleep(2)\n request = post(\n 'https://api.github.com/graphql', json={'query': query}, headers=headers\n )\n if request.status_code == 200:\n return request.json()\n else:\n raise Exception(\"Query falhou! Codigo de retorno: {}. {}\".format(request.status_code, query))\n\ndef getAllRepositories(query):\n finalQuery = query.replace(\"{AFTER}\", \"\")\n result = runQuery(finalQuery)\n\n totalPages = 1\n hasNextPage = result[\"data\"][\"search\"][\"pageInfo\"][\"hasNextPage\"]\n currentEndCursor = result[\"data\"][\"search\"][\"pageInfo\"][\"endCursor\"]\n\n allResults = result[\"data\"][\"search\"][\"nodes\"]\n\n while hasNextPage and totalPages <= 10:\n finalQuery = query.replace(\"{AFTER}\", f', after: \"{currentEndCursor}\"')\n result = runQuery(finalQuery)\n\n totalPages += 1\n hasNextPage = result[\"data\"][\"search\"][\"pageInfo\"][\"hasNextPage\"]\n currentEndCursor = result[\"data\"][\"search\"][\"pageInfo\"][\"endCursor\"]\n \n allResults += result[\"data\"][\"search\"][\"nodes\"]\n\n writeCSV(\"repositories.csv\", allResults)\n\ndef getAllIssues(query):\n with open(\"repositories.csv\", \"r\", encoding=\"utf-8\") as f:\n lines = f.read()\n for line in lines.splitlines():\n line = line.split(\",\")\n\n nameWithOwner = line[1].split(\"/\")\n owner = nameWithOwner[0]\n name = nameWithOwner[1]\n\n idRepository = line[0]\n\n allResults = getRepositoryIssues(owner, name, query)\n\n for result in allResults:\n result[\"idRepository\"] = idRepository\n result[\"owner\"] = owner\n result[\"name\"] = name\n\n writeCSV(\"issues.csv\", allResults)\n\ndef getRepositoryIssues(owner, name, query):\n finalQuery = query.replace(\"{OWNER}\", owner).replace(\"{NAME}\", name).replace(\"{AFTER}\", \"\")\n result = runQuery(finalQuery)\n\n totalPages = 1\n currentEndCursor = result[\"data\"][\"repository\"][\"issues\"][\"pageInfo\"][\"endCursor\"]\n hasNextPage = result[\"data\"][\"repository\"][\"issues\"][\"pageInfo\"][\"hasNextPage\"]\n\n allResults = result[\"data\"][\"repository\"][\"issues\"][\"nodes\"]\n\n while hasNextPage and totalPages <= 10:\n finalQuery = query.replace(\"{OWNER}\", owner).replace(\"{NAME}\", name).replace(\"{AFTER}\", f', after: \"{currentEndCursor}\"')\n result = runQuery(finalQuery)\n\n totalPages += 1\n currentEndCursor = result[\"data\"][\"repository\"][\"issues\"][\"pageInfo\"][\"endCursor\"]\n hasNextPage = result[\"data\"][\"repository\"][\"issues\"][\"pageInfo\"][\"hasNextPage\"]\n\n allResults += result[\"data\"][\"repository\"][\"issues\"][\"nodes\"]\n\n return allResults \n\ndef writeCSV(file, allResults):\n with open(file, \"a\", newline = '', encoding=\"utf-8\") as csv_file:\n csv = writer(csv_file)\n for result in allResults:\n csv.writerow(result.values())\n\ndef main():\n getAllRepositories(repositoriesQuery)\n getAllIssues(issuesQuery)\n\nmain()\n","sub_path":"Lab03/GraphQL.py","file_name":"GraphQL.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"296732648","text":"import mcpi.minecraft as minecraft # Load libraries\nfrom ISStreamer.Streamer import Streamer\nimport mcpi.block as block\nimport time, datetime, psutil\n\nfor pros in psutil.pids(): # Get the Linux process number for the Minecraft program\n if psutil.Process(pros).name() == 'minecraft-pi' and len(psutil.Process(pros).cmdline()) == 1:\n pm = psutil.Process(pros)\nstreamer=Streamer(bucket_name=\":mushroom: Terraforming\", bucket_key=\"XLKPE4TQSALA\", access_key= \"vEtDShUV70BYKU418rv20GwJcjyaPvhJ\")\nFree_account = True # If using a free IS account, set to True to limit data uploads & avoid exceeding monthly limit\n# Function to Upload various bits of data to IS\ndef upload_data_to_IS(speed,elapsed,blocks_processed, blocks_transformed,cpu,y,x,z,mem,pm,num_blocks):\n print('Uploading to Initial State')\n streamer.log(\":snail: Run Speed\",speed)\n streamer.log(\":jack_o_lantern: Run2 Time since last \"+ str(num_blocks) + \"blocks\",elapsed)\n streamer.log(\":volcano: Run2 Total Blocks\",blocks_processed)\n streamer.log(\":chocolate_bar:Run2 Blocks transformed\",blocks_transformed)\n streamer.log(\":up: CPU %\",cpu)\n streamer.log(\":arrow_down: Y\",y)\n streamer.log(\":arrow_right: X\",x)\n streamer.log(\":arrow_left: Z\",z)\n streamer.log(\":question: Memory used %\",mem.percent)\n streamer.log(\":question: Minecraft Process memory used %\",pm.memory_percent())\n\ntime.sleep(1)\nmc=minecraft.Minecraft.create() # Connect to Minecraft\nkeepblocks=[block.AIR.id,block.WATER.id,block.LAVA.id,block.SNOW.id,block.WATER_FLOWING.id,block.WATER_STATIONARY]\ncounter = 0 # A bunch of variables to keep track of how many blocks have been processed\nblocks_processed = 0\nblocks_transformed = 0\nblocks_since = 0\nthrottle = 5 # Use this when Free_account is True, to restrict amount of data uploaded\nnum_blocks = 1000 # How many blocks to transform before pausing to let Minecraft catch-up\nstart = time.time()\nfor x in range(-128,128): # the x-direction\n for y in range(-4,35): # the y-direction (up/down)\n for z in range(-128,128): # the z-direction\n print(x,y,z)\n test = mc.getBlock(x,y,z) # Read a block at x,y,z\n blocks_processed+=1\n blocks_since+=1\n if test not in keepblocks: # Don't transform these blocks (should always contain AIR)\n counter+=1\n if counter > num_blocks:\n blocks_transformed+=num_blocks\n counter = 0\n stop = time.time()\n elapsed = stop - start # How long since last group of blocks were processed?\n speed = blocks_since/elapsed # calculate speed\n cpu = psutil.cpu_percent() # Read CPU utilisation\n mem = psutil.virtual_memory() # read memory usage data\n if Free_account: # Only bother to throttle if using free IS account\n if throttle == 0:\n upload_data_to_IS(speed,elapsed,blocks_processed, blocks_transformed,cpu,y,x,z,mem,pm,num_blocks)\n throttle = 5\n else:\n throttle-=1\n print('reducing throttle')\n else:\n upload_data_to_IS(speed,elapsed,blocks_processed, blocks_transformed,cpu,y,x,z,mem,pm, num_blocks)\n time.sleep(5) # Pause to allow Minecraft catch-up\n start = time.time()\n blocks_since=0\n mc.setBlock(x,y,z,block.DIAMOND_ORE.id) # <- set this: the block to which you want other transformed\n print('Changing Block: ' + str(test) + ' (counter = ' + str(counter) + ')')\n time.sleep(0.1)\n else:\n print('Not changing Block: ' + str(test) + ' (counter = ' + str(counter) + ')')\n","sub_path":"python/2015-12-21_through_2016-10-29_The_first_projects/is_terraforming.py","file_name":"is_terraforming.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"106736276","text":"import glob\nimport sys\nfrom metadata import *\nfrom utils import *\nfrom sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering, DBSCAN\nimport argparse\nimport pickle\nimport numpy, scipy.io\nimport torch\nimport numpy as np\nimport joblib\nimport pywt\nfrom overall_event_get_input import *\nimport joblib\nfrom Atomic_node_only_lstm import Atomic_node_only_lstm\nimport copy\nimport time\nimport threading\nimport os, os.path\nfrom metric import *\n# from BeamSearchClass import *\nfrom multiprocessing import Process\n#import torch.multiprocessing\nfrom Atomic_node_only_lstm_517 import Atomic_node_only_lstm_first_view\n# torch.multiprocessing.set_start_method('spawn', force='True')\nfrom joblib import Parallel, delayed\nfrom os import listdir\nfrom para_bank import generate_para_bank\nfrom itertools import product\n\nclass BeamSearch(object):\n\n # ================\n # init functions\n # ================\n def __init__(self, args, event_net, atomic_net):\n\n self.args = args\n self.epsilon = 0.000001\n self.init_cps_all = pickle.load(open(args.init_cps, 'rb'), encoding='latin1')\n\n self.event_net=event_net\n self.atomic_net=atomic_net\n\n self.event_trans_table = pickle.load(open(os.path.join(args.stat_path, 'event_trans_normalized.p'), 'rb'), encoding='latin1')\n\n def init(self, clip):\n\n self.clip = clip\n self.clip_length = clips_len[clip]\n\n # initial cps\n self.init_cps_T = self.init_cps_all[0][self.clip]\n self.init_cps_B = self.init_cps_all[1][self.clip]\n # add the final frame\n self.init_cps_T.append(self.clip_length)\n self.init_cps_T=list(np.unique(self.init_cps_T))\n self.init_cps_B.append(self.clip_length)\n self.init_cps_B = list(np.unique(self.init_cps_B))\n\n with open(self.args.tracker_bbox + clip, 'rb') as f:\n self.person_tracker_bbox = pickle.load(f, encoding='latin1')\n with open(self.args.battery_bbox + clip, 'rb') as f:\n self.person_battery_bbox = pickle.load(f, encoding='latin1')\n\n # attmat\n with open(self.args.attmat_path + clip, 'rb') as f:\n self.attmat_obj = pickle.load(f, encoding='latin1')\n\n with open(self.args.cate_path + clip, 'rb') as f:\n self.category = pickle.load(f, encoding='latin1')\n\n # feature\n with open(os.path.join(self.args.data_path2, 'feature_single', clip), 'rb') as f:\n self.feature_single = pickle.load(f, encoding='latin1')\n\n if tracker_skeID[clip.split('.')[0]] == 'skele1.p':\n self.T_skeID = 1\n self.B_skeID = 2\n elif tracker_skeID[clip.split('.')[0]] == 'skele2.p':\n self.T_skeID = 2\n self.B_skeID = 1\n\n # init root node and tree for each new clip\n self.root_node = {'T': {'cp': [0], 'event': [], 'mind': [], 'event_vec':[]}, 'B': {'cp': [0], 'event': [], 'mind': [], 'event_vec':[]},\n 'score':{'prior_aggr':{'cnt_joint':0., 'cnt_single':0.},\n 'prior_event':{'e_T':0., 'cnt_T':0., 'e_S':0., 'cnt_S':0., 'T_last_type':None, 'B_last_type':None},\n 'prior_fluent':0.,\n 'like_P':{'T_D_i':0., 'T_CNT_i':0., 'B_D_i':0., 'B_CNT_i':0., 'T_D_t':0., 'T_CNT_t':0., 'B_D_t':0., 'B_CNT_t':0., 'last_T_hist':None, 'last_B_hist':None, 'D_s':0., 'CNT_s':0.},\n 'like_E':{'e_E':0., 'CNT':0.},\n 'like_M':0,\n 'T_update_flag':0,\n 'B_update_flag':0\n }\n } # flag indicates whether it's updated.\n self.Tree = {'nodes': [self.root_node], 'level': 0}\n\n self.check_event_flag=True\n\n # ================\n # utils functions\n # ================\n def seg2frame(self, node):\n segs = node['cp']\n events = node['event']\n #assert (len(segs) - 1) == len(events)\n\n frame_labels = np.zeros((segs[-1]))\n for seg_id, seg in enumerate(segs[:-1]):\n event = events[seg_id][0]\n start = seg\n end = segs[seg_id + 1]\n frame_labels[start: end] = event\n\n return frame_labels\n\n def check_event(self, node):\n # if not valid, return True\n tracker_frames = self.seg2frame(node['T'])\n battery_frames = self.seg2frame(node['B'])\n overlap_id = min(tracker_frames.shape[0], battery_frames.shape[0])\n\n #compare = np.logical_xor(tracker_frames[:overlap_id], battery_frames[:overlap_id])\n compare=np.abs(tracker_frames[:overlap_id]-battery_frames[:overlap_id])\n return np.sum(compare) > 0\n\n def cps2segs(self, cps):\n segs = []\n if len(cps) >= 2:\n cp_l = cps[0]\n segs = []\n for idx in range(1, len(cps)):\n cp_r = cps[idx]\n segs.append([cp_l, cp_r])\n cp_l = cp_r\n return segs\n\n def combcps(self, cps1, cps2):\n comb = []\n comb.extend(cps1)\n comb.extend(cps2)\n comb = list(np.sort(np.unique(np.array(comb))))\n\n return comb\n\n def seg2cp(self, segs):\n cp = []\n for seg in segs:\n cp.append(seg[0])\n return cp\n\n def ori_seg_id(self, ori_cps, seg):\n if len(ori_cps) > 1:\n for idx in range(1, len(ori_cps)):\n if seg[0] >= ori_cps[idx - 1] and seg[1] <= ori_cps[idx]:\n return idx - 1\n return None\n\n def freq2hist(self, freq_feature):\n seg_features = np.empty((1, 0))\n for dim_id in range(freq_feature.shape[1]):\n video_vec = freq_feature[:, dim_id] / 4\n hist, bin_edges = np.histogram(video_vec, bins=self.args.hist_bin, density=True)\n seg_features = np.hstack([seg_features, hist.reshape((1, -1))])\n return seg_features\n\n def temporal2freq(self, feature):\n coeffs = pywt.dwt(feature, 'sym2')\n cA1, _ = coeffs\n new_feature = self.freq2hist(cA1)\n return new_feature\n\n def find_sep_gt(self, tracker_gt_seg, segs_T):\n curr_gt_seg = []\n start_frame = segs_T[0][0]\n end_frame = segs_T[-1][1]\n for seg in tracker_gt_seg:\n max_start = max(seg[0], start_frame)\n min_end = min(seg[1], end_frame)\n if max_start < min_end:\n curr_gt_seg.append(seg)\n return curr_gt_seg\n\n # ================\n # score functions\n # ================\n def prior_energy_aggr(self, node):\n\n N_p = len(node['T']['cp']) / float(node['T']['cp'][-1]) + len(node['B']['cp']) / float(node['B']['cp'][-1])\n\n e_aggr = self.args.lambda_1 * N_p\n\n return e_aggr, node\n\n def prior_energy_event(self, node):\n e_T = node['score']['prior_event']['e_T']\n cnt_T = node['score']['prior_event']['cnt_T']\n T_last_type=node['score']['prior_event']['T_last_type']\n B_last_type=node['score']['prior_event']['B_last_type']\n\n # temporal transition\n T_new_type = node['T']['event'][-1][0]\n if T_last_type is not None:\n trans_key = (T_last_type, T_new_type)\n e_T += self.event_trans_table[trans_key]\n cnt_T += 1\n T_last_type=T_new_type\n\n B_new_type = node['B']['event'][-1][0]\n if B_last_type is not None:\n trans_key = (B_last_type, B_new_type)\n e_T += self.event_trans_table[trans_key]\n cnt_T += 1\n B_last_type=B_new_type\n\n node['score']['prior_event']['e_T']=e_T\n node['score']['prior_event']['cnt_T']=cnt_T\n node['score']['prior_event']['T_last_type']=T_last_type\n node['score']['prior_event']['B_last_type']=B_last_type\n\n e_T = e_T / (cnt_T + self.epsilon)\n\n # spatial concurrency\n e_S = node['score']['prior_event']['e_S']\n cnt_S = node['score']['prior_event']['cnt_S']\n\n segs_T = self.cps2segs(node['T']['cp'])\n segs_B = self.cps2segs(node['B']['cp'])\n\n if node['score']['T_update_flag']==1:\n for idx2 in range(len(segs_B)):\n if segs_T[-1][0] >= segs_B[idx2][1]:\n continue\n elif segs_T[-1][1] <= segs_B[idx2][0]:\n break\n else:\n event_T = node['T']['event'][-1][0]\n event_B = node['B']['event'][idx2][0]\n if event_T == event_B:\n e_S += 1\n cnt_S += 1\n else:\n e_S += 0\n cnt_S += 1\n\n if node['score']['B_update_flag']==1:\n for idx1 in range(len(segs_T)):\n if segs_T[idx1][0] >= segs_B[-1][1]:\n break\n elif segs_T[idx1][1] <= segs_B[-1][0]:\n continue\n else:\n event_T = node['T']['event'][idx1][0]\n event_B = node['B']['event'][-1][0]\n if event_T == event_B:\n e_S += 1\n cnt_S += 1\n else:\n e_S += 0\n cnt_S += 1\n if node['score']['T_update_flag']==1 and node['score']['B_update_flag']==1:\n event_T = node['T']['event'][-1][0]\n event_B = node['B']['event'][-1][0]\n if event_T == event_B:\n e_S -= 1\n cnt_S -= 1\n else:\n e_S -= 0\n cnt_S-= 1\n\n node['score']['prior_event']['e_S']=e_S\n node['score']['prior_event']['cnt_S']=cnt_S\n\n e_S = e_S / (cnt_S + self.epsilon)\n\n e_event = -self.args.lambda_4 * (e_T + self.epsilon) - self.args.lambda_5 * (e_S + self.epsilon)\n\n return e_event, node\n\n def likelihood_energy_P(self, node):\n\n # tracker\n # inner particle distance\n T_D_i = node['score']['like_P']['T_D_i']\n T_CNT_i = node['score']['like_P']['T_CNT_i']\n\n seg = [node['T']['cp'][-2], node['T']['cp'][-1]]\n feature = self.feature_single[self.T_skeID][seg[0]:seg[1], :]\n sum_tmp = 0\n for idx in range(1, feature.shape[0]):\n sum_tmp += np.linalg.norm(feature[idx - 1] - feature[idx])\n sum_tmp = sum_tmp / float(feature.shape[0])\n T_D_i += sum_tmp\n T_CNT_i += 1\n\n node['score']['like_P']['T_D_i']= T_D_i\n node['score']['like_P']['T_CNT_i']= T_CNT_i\n\n # inter particle distance - T\n T_D_t = node['score']['like_P']['T_D_t']\n T_CNT_t = node['score']['like_P']['T_CNT_t']\n last_T_hist= node['score']['like_P']['last_T_hist']\n T_hist = self.temporal2freq(feature)\n if last_T_hist is not None:\n T_D_t += np.linalg.norm(last_T_hist - T_hist)/312.\n T_CNT_t += 1\n\n node['score']['like_P']['T_D_t']=T_D_t\n node['score']['like_P']['T_CNT_t']=T_CNT_t\n node['score']['like_P']['last_T_hist']=T_hist\n\n # battery\n # inner particle distance\n B_D_i = node['score']['like_P']['B_D_i']\n B_CNT_i = node['score']['like_P']['B_CNT_i']\n seg = [node['B']['cp'][-2], node['B']['cp'][-1]]\n feature = self.feature_single[self.B_skeID][seg[0]:seg[1], :]\n sum_tmp = 0\n for idx in range(1, feature.shape[0]):\n sum_tmp += np.linalg.norm(feature[idx - 1] - feature[idx])\n sum_tmp = sum_tmp / float(feature.shape[0])\n B_D_i += sum_tmp\n B_CNT_i += 1\n\n node['score']['like_P']['B_D_i']= B_D_i\n node['score']['like_P']['B_CNT_i']= B_CNT_i\n\n # inter particle distance - T\n B_D_t = node['score']['like_P']['B_D_t']\n B_CNT_t = node['score']['like_P']['B_CNT_t']\n last_B_hist= node['score']['like_P']['last_B_hist']\n B_hist = self.temporal2freq(feature)\n if last_B_hist is not None:\n B_D_t += np.linalg.norm(last_B_hist - B_hist)/312.\n B_CNT_t += 1\n\n node['score']['like_P']['B_D_t']=B_D_t\n node['score']['like_P']['B_CNT_t']=B_CNT_t\n node['score']['like_P']['last_B_hist']=B_hist\n\n e_P = self.args.beta_1 * (T_D_i / (T_CNT_i + self.epsilon) + B_D_i / (B_CNT_i + self.epsilon)) - self.args.beta_3 * (T_D_t / (T_CNT_t + self.epsilon) + B_D_t / (B_CNT_t + self.epsilon))\n\n return e_P, node\n\n def likelihood_energy_E(self, node):\n\n e_E = node['score']['like_E']['e_E']\n CNT = node['score']['like_E']['CNT']\n\n if node['score']['T_update_flag'] == 1:\n e_E += node['T']['event'][-1][1]\n CNT += 1\n if node['score']['B_update_flag'] == 1:\n e_E += node['B']['event'][-1][1]\n CNT += 1\n\n node['score']['like_E']['e_E'] = e_E\n node['score']['like_E']['CNT'] = CNT\n\n e_E = e_E / CNT\n\n energy_E = -self.args.gamma_1 * e_E\n\n return energy_E, node\n\n\n # ================\n # tree functions\n # ================\n def tree_prune(self, all_possible_path):\n '''\n input:\n all possible paths in this level\n :return:\n top N paths\n '''\n\n score_all = []\n node_ids = []\n all_possible_nodes_new=[]\n\n if self.check_event_flag:\n for idx, node in enumerate(all_possible_path):\n # calculate score for the current node/path\n if not self.check_event(node): # note the if here\n\n e_aggr, node= self.prior_energy_aggr(node)\n e_event,node=self.prior_energy_event(node)\n e_P, node=self.likelihood_energy_P(node)\n e_E, node=self.likelihood_energy_E(node)\n\n node_score = -e_aggr-e_event-e_P-e_E\n score_all.append(node_score)\n node_ids.append(idx)\n\n all_possible_nodes_new.append(node)\n\n else:\n\n for idx, node in enumerate(all_possible_path):\n # calculate score for the current node/path\n e_aggr, node = self.prior_energy_aggr(node)\n e_event, node = self.prior_energy_event(node)\n e_P, node = self.likelihood_energy_P(node)\n e_E, node = self.likelihood_energy_E(node)\n\n node_score = -e_aggr - e_event - e_P - e_E\n score_all.append(node_score)\n node_ids.append(idx)\n all_possible_nodes_new.append(node)\n\n ordered_index = list(np.argsort(np.array(score_all))[::-1])\n selected_index = ordered_index[:self.args.topN]\n node_ids = np.array(node_ids)\n top_node_ids = node_ids[selected_index]\n\n # assert len(top_node_ids)>0, 'no valid top nodes!'\n if len(top_node_ids)>0:\n self.Tree['nodes'] = []\n for node_id in top_node_ids:\n node = all_possible_nodes_new[node_id]\n self.Tree['nodes'].append(node)\n return True\n else:\n\n self.check_event_flag=False\n print('err! no valid top nodes! first time')\n score_all = []\n node_ids = []\n all_possible_nodes_new = []\n for idx, node in enumerate(all_possible_path):\n # calculate score for the current node/path\n e_aggr, node = self.prior_energy_aggr(node)\n e_event, node = self.prior_energy_event(node)\n e_P, node = self.likelihood_energy_P(node)\n e_E, node = self.likelihood_energy_E(node)\n\n node_score = -e_aggr - e_event - e_P - e_E\n score_all.append(node_score)\n node_ids.append(idx)\n\n all_possible_nodes_new.append(node)\n\n ordered_index = list(np.argsort(np.array(score_all))[::-1])\n selected_index = ordered_index[:self.args.topN]\n node_ids = np.array(node_ids)\n top_node_ids = node_ids[selected_index]\n\n if len(top_node_ids)>0:\n self.Tree['nodes'] = []\n for node_id in top_node_ids:\n node = all_possible_nodes_new[node_id]\n self.Tree['nodes'].append(node)\n return True\n else:\n print('err! no valid top nodes! second time')\n return False\n\n def node_expand(self, parent_node):\n '''\n input:\n parent checkpoint node\n :return:\n all possible children nodes\n '''\n t_node_id = parent_node['T']['cp'][-1]\n possible_t_cp =[]\n for i in range(len(self.init_cps_T)):\n if self.init_cps_T[i] > t_node_id:\n possible_t_cp.append(self.init_cps_T[i])\n\n b_node_id = parent_node['B']['cp'][-1]\n possible_b_cp = []\n for j in range(len(self.init_cps_B)):\n if self.init_cps_B[j] > b_node_id:\n possible_b_cp.append(self.init_cps_B[j])\n\n return possible_t_cp, possible_b_cp\n\n\n def tree_grow(self):\n #print(\"I am process {}-- into tree grow\".format(process_i))\n '''\n input:\n current top N possible path\n :return:\n new top N possible path\n '''\n all_possible_nodes = []\n start_time = time.time()\n for idx, parent_node in enumerate(self.Tree['nodes']):\n # find possible child nodes of the current node\n possible_t_cp, possible_b_cp = self.node_expand(parent_node)\n search_N_cp_T = min(len(possible_t_cp), self.args.search_N_cp)\n search_N_cp_B = min(len(possible_b_cp), self.args.search_N_cp)\n\n # all possible paths\n if len(possible_t_cp) >=1 and len(possible_b_cp) >=1:\n for cp_t_id in possible_t_cp[:search_N_cp_T]:\n for cp_b_id in possible_b_cp[:search_N_cp_B]:\n\n combinations = list(product([0, 1, 2], repeat=2))\n #print(combinations)\n for combination in combinations:\n new_node = copy.deepcopy(parent_node)\n new_node['T']['cp'].append(cp_t_id)\n new_node['B']['cp'].append(cp_b_id)\n\n start = parent_node['T']['cp'][-1]\n end = cp_t_id\n with open(op.join(self.args.save_event_score, self.clip.split('.')[0], 'tracker',\n '{}_{}.p'.format(start, end)), 'rb') as f:\n outputs = pickle.load(f)\n new_node['T']['event'].append([combination[0], outputs[0][combination[0]]])\n new_node['T']['event_vec'].append(outputs[0])\n\n start = parent_node['B']['cp'][-1]\n end = cp_b_id\n with open(op.join(self.args.save_event_score, self.clip.split('.')[0], 'battery',\n '{}_{}.p'.format(start, end)), 'rb') as f:\n outputs = pickle.load(f)\n new_node['B']['event'].append([combination[1], outputs[0][combination[1]]])\n new_node['B']['event_vec'].append(outputs[0])\n\n new_node['score']['T_update_flag'] = 1\n new_node['score']['B_update_flag'] = 1\n\n all_possible_nodes.append(new_node)\n\n elif len(possible_t_cp) == 0 and len(possible_b_cp) >=1:\n for cp_b_id in possible_b_cp[:search_N_cp_B]:\n\n new_node1, new_node2, new_node3 = copy.deepcopy(parent_node), copy.deepcopy(\n parent_node), copy.deepcopy(parent_node)\n\n # add new cp\n new_node1['B']['cp'].append(cp_b_id)\n new_node2['B']['cp'].append(cp_b_id)\n new_node3['B']['cp'].append(cp_b_id)\n\n # predict event for current seg\n # battery\n start = parent_node['B']['cp'][-1]\n end = cp_b_id\n with open(op.join(self.args.save_event_score, self.clip.split('.')[0], 'battery',\n '{}_{}.p'.format(start, end)), 'rb') as f:\n outputs = pickle.load(f)\n new_node1['B']['event'].append([0, outputs[0][0]])\n new_node1['B']['event_vec'].append(outputs[0])\n new_node2['B']['event'].append([1, outputs[0][1]])\n new_node2['B']['event_vec'].append(outputs[0])\n new_node3['B']['event'].append([2, outputs[0][2]])\n new_node3['B']['event_vec'].append(outputs[0])\n\n new_node1['score']['T_update_flag'] = 0\n new_node1['score']['B_update_flag'] = 1\n new_node2['score']['T_update_flag'] = 0\n new_node2['score']['B_update_flag'] = 1\n new_node3['score']['T_update_flag'] = 0\n new_node3['score']['B_update_flag'] = 1\n all_possible_nodes.append(new_node1)\n all_possible_nodes.append(new_node2)\n all_possible_nodes.append(new_node3)\n elif len(possible_t_cp) >= 1 and len(possible_b_cp) == 0:\n\n for cp_t_id in possible_t_cp[:search_N_cp_T]:\n\n new_node1, new_node2, new_node3 = copy.deepcopy(parent_node), copy.deepcopy(\n parent_node), copy.deepcopy(parent_node)\n\n # add new cp\n new_node1['T']['cp'].append(cp_t_id)\n new_node2['T']['cp'].append(cp_t_id)\n new_node3['T']['cp'].append(cp_t_id)\n\n # predict event for current seg\n # battery\n start = parent_node['T']['cp'][-1]\n end = cp_t_id\n\n with open(op.join(self.args.save_event_score, self.clip.split('.')[0], 'tracker',\n '{}_{}.p'.format(start, end)), 'rb') as f:\n outputs = pickle.load(f)\n new_node1['T']['event'].append([0, outputs[0][0]])\n new_node1['T']['event_vec'].append(outputs[0])\n new_node2['T']['event'].append([1, outputs[0][1]])\n new_node2['T']['event_vec'].append(outputs[0])\n new_node3['T']['event'].append([2, outputs[0][2]])\n new_node3['T']['event_vec'].append(outputs[0])\n\n new_node1['score']['T_update_flag'] = 1\n new_node1['score']['B_update_flag'] = 0\n new_node2['score']['T_update_flag'] = 1\n new_node2['score']['B_update_flag'] = 0\n new_node3['score']['T_update_flag'] = 1\n new_node3['score']['B_update_flag'] = 0\n all_possible_nodes.append(new_node1)\n all_possible_nodes.append(new_node2)\n all_possible_nodes.append(new_node3)\n\n\n if len(all_possible_nodes) == 0:\n #print(\"Beam Search comes to the end for clip {}. Total level is {}\".format(self.clip, self.Tree['level']))\n return self.Tree['nodes'][0]\n else:\n flag=self.tree_prune(all_possible_nodes)\n self.Tree['level'] += 1\n if flag==True:\n return None\n elif flag==False:\n return self.Tree['nodes'][0]\n\ndef test_best_Tree(atomic_net, event_net, clip_idx, clip, args):\n\n pid = threading.get_ident()\n print('pid: {} clip {}'.format(pid, clip))\n\n beam_search = BeamSearch(args, event_net, atomic_net)\n\n beam_search.init(clip)\n while True:\n Tree_best=beam_search.tree_grow()\n if Tree_best is not None:\n break\n\n with open(op.join(args.save_path, clip), 'wb') as f:\n pickle.dump(Tree_best, f, protocol=2)\n\ndef parse_arguments():\n\n parser=argparse.ArgumentParser(description='')\n # path\n home_path='/home/lfan/Dropbox/Projects/NIPS20/'\n home_path2='/media/lfan/HDD/NIPS20/'\n parser.add_argument('--project-path',default = home_path)\n parser.add_argument('--project-path2', default=home_path2)\n parser.add_argument('--data-path', default=home_path+'data/')\n parser.add_argument('--data-path2', default=home_path2 + 'data/')\n parser.add_argument('--img-path', default=home_path+'annotations/')\n parser.add_argument('--save-path', default='')\n parser.add_argument('--init-cps', default='/media/lfan/HDD/NIPS20/data/init_cps/CPS_NEW.p')\n parser.add_argument('--stat-path', default=home_path+'data/stat/')\n parser.add_argument('--attmat-path', default=home_path+'data/record_attention_matrix/')\n parser.add_argument('--cate-path', default=home_path2+'data/track_cate/')\n parser.add_argument('--tracker-bbox', default=home_path2+'data/tracker_record_bbox/')\n parser.add_argument('--battery-bbox', default=home_path2+'data/record_bbox/')\n parser.add_argument('--obj-bbox', default=home_path2+'data/post_neighbor_smooth_newseq/')\n parser.add_argument('--ednet-path', default=home_path+'model/ednet_tuned_best.pth')\n parser.add_argument('--atomic-path', default=home_path+'model/atomic_best.pth')\n parser.add_argument('--seg-label', default=home_path + 'data/segment_labels/')\n parser.add_argument('--feature-single', default=home_path2 + 'data/feature_single/')\n parser.add_argument('--save-event-score', default='/media/lfan/HDD/NIPS20/data/EVENT_SCORE/')\n\n # parameter\n parser.add_argument('--lambda-1', default=5)\n parser.add_argument('--lambda-4', default=10)\n parser.add_argument('--lambda-5', default=20)\n parser.add_argument('--beta-1', default=10)\n parser.add_argument('--beta-3', default=0.5)\n parser.add_argument('--gamma-1', default=15)\n parser.add_argument('--search-N-cp', default=5)\n parser.add_argument('--topN', default=3)\n\n parser.add_argument('--hist-bin', default=10)\n parser.add_argument('--seg-alpha', default=50)\n\n # others\n parser.add_argument('--cuda', default=False)\n parser.add_argument('--ip', default='192.168.1.17')\n parser.add_argument('--port', default=1234)\n parser.add_argument('--resume',default=False)\n parser.add_argument('--test-func-batch-size', default=16)\n parser.add_argument('--gt-mode', default=True)\n\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n\n args=parse_arguments()\n args.save_path=op.join(args.project_path2, 'BestTree_ours_0531_all_clips')\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n\n atomic_event_net = Atomic_node_only_lstm_first_view()\n load_best_checkpoint(atomic_event_net, path=args.atomic_path)\n if args.cuda and torch.cuda.is_available():\n atomic_event_net.cuda()\n atomic_event_net.eval()\n\n event_net=EDNet()\n event_net.load_state_dict(torch.load(args.ednet_path))\n if args.cuda and torch.cuda.is_available():\n event_net.cuda()\n event_net.eval()\n\n args.topN = 3\n args.lambda_1 = 3\n args.lambda_4 = 20\n args.lambda_5 = 30\n args.beta_1 = 13\n args.beta_3 = 0.7\n args.gamma_1 = 30\n args.search_N_cp = 5\n\n Parallel(n_jobs=-1)(delayed(test_best_Tree)(atomic_event_net, event_net, clip_idx, clip, args) for clip_idx, clip in enumerate(clips_all))\n\n","sub_path":"src/beam_search_ours.py","file_name":"beam_search_ours.py","file_ext":"py","file_size_in_byte":27650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"344410758","text":"#!/usr/bin/env python\n\n\"\"\"Tests for `vcf_consensus_builder` package.\"\"\"\n\nimport re\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\nfrom Bio import SeqIO\nfrom click.testing import CliRunner\nfrom vcf_consensus_builder.vcf_consensus_builder_core import consensus\n\nimport cli\nimport unittest\n\nVCF = 'data/test.vcf'\nREF_FASTA = 'data/ref.fa'\nDEPTHS = 'data/test-depths.tsv'\nOUTPUT_FASTA = 'out.fa'\n\n\nclass TestBigBangCLI(unittest.TestCase):\n def test_command_line_interface_help(self):\n \"\"\"Test the CLI.\"\"\"\n runner = CliRunner()\n help_result = runner.invoke(cli.main, ['--help'])\n assert help_result.exit_code == 0\n assert re.search(r'--help\\s+Show this message and exit.', help_result.output, flags=re.DOTALL) is not None\n\n def test_command_line_interface_vcf_and_ref(self):\n # Test replacing multi-char deletion and SNPs and setting sample name via command-line\n runner = CliRunner()\n with TemporaryDirectory(prefix='vcf_consensus_builder', dir='/tmp') as tempdir:\n temppath = Path(tempdir)\n full_fasta_output = temppath / OUTPUT_FASTA\n consensus(REF_FASTA, VCF, DEPTHS, full_fasta_output)\n\n records = list(SeqIO.parse(full_fasta_output, 'fasta'))\n assert str(records[0].seq) == 'NACCGTANACAATAN--'\n assert str(records[1].seq) == 'TATACACATCCACGGC-N-N-N-N-N--T'\n\n def test_command_line_interface_no_coverage_threshold(self):\n # Test replacing multi-char deletion and SNPs and setting sample name via command-line\n runner = CliRunner()\n\n # Test changing no coverage threshold\n with TemporaryDirectory(prefix='vcf_consensus_builder', dir='/tmp') as tempdir:\n temppath = Path(tempdir)\n full_fasta_output = temppath / OUTPUT_FASTA\n consensus(REF_FASTA, VCF, DEPTHS, full_fasta_output, no_coverage=5)\n records = list(SeqIO.parse(full_fasta_output, 'fasta'))\n assert str(records[0].seq) == '-ACCGTA-ACAAT----', 'Positions <= 5X coverage must be replaced with \"-\"'\n assert str(records[1].seq) == 'TATACACATCC------------------', 'Positions <= 5X coverage must be replaced with \"-\"'\n\n def test_command_line_interface_low_and_no_coverage_threshold_with_other_chars(self):\n # Test replacing low and no coverage characters with other characters than default N and - respectively\n runner = CliRunner()\n\n with TemporaryDirectory(prefix='vcf_consensus_builder', dir='/tmp') as tempdir:\n temppath = Path(tempdir)\n full_fasta_output = temppath / OUTPUT_FASTA\n consensus(REF_FASTA, VCF, DEPTHS, full_fasta_output, no_cov_char=\"=\", low_cov_char=\"@\")\n records = list(SeqIO.parse(full_fasta_output, 'fasta'))\n assert str(records[0].seq) == '@ACCGTA@ACAATA@==', \\\n 'No coverage positions must be replaced with \"=\". Low coverage (<5X) positions must be replaced with \"@\".'\n assert str(records[1].seq) == 'TATACACATCCACGGC=@=@=@=@=@==T', \\\n 'No coverage positions must be replaced with \"=\". Low coverage (<5X) positions must be replaced with \"@\".'\n\n\n\n","sub_path":"tests/test_big_bang_tests.py","file_name":"test_big_bang_tests.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"654167817","text":"# В массиве найти максимальный отрицательный элемент. Вывести на\n# экран его значение и позицию в массиве.\n\nimport random\n\narr = [random.randint(-99, 99) for _ in range(100)]\n# print(arr)\n\nmin_index = 0\n\nfor i in arr:\n if arr[min_index] > i:\n min_index = arr.index(i)\n print(min_index)\n\nprint(f'Минимальный отрицательный элемент: {arr[min_index]}.Находится на позиции {min_index}')\n","sub_path":"Lesson_3/lesson-03_5.py","file_name":"lesson-03_5.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"318294618","text":"\r\nimport sys\r\nN = int(input())\r\nA = list(map(int,sys.stdin.readline().split()))\r\nM = int(input())\r\nB = list(map(int, sys.stdin.readline().split()))\r\n\r\ndef binary_search(arr, left, right, target):\r\n\r\n while left <= right:\r\n mid = (left + right) // 2\r\n\r\n if arr[mid] == target:\r\n return mid\r\n elif arr[mid] < target:\r\n left = mid + 1\r\n elif arr[mid] > target:\r\n right = mid - 1\r\n return -1\r\n\r\n\r\nA.sort()\r\nfor i in B:\r\n res = binary_search(A, 0, len(A)-1, i)\r\n print(1 if res > -1 else 0)\r\n\r\n","sub_path":"백준/Silver/1920. 수 찾기/수 찾기.py","file_name":"수 찾기.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"128631457","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom ..registry import LOSSES\nfrom .base import BaseWeightedLoss\n\n\n@LOSSES.register_module()\nclass TotalVarianceLoss(BaseWeightedLoss):\n def __init__(self, spatial_kernels, temporal_kernels, num_channels,\n hard_values=False, limits=None, threshold=None, **kwargs):\n super(TotalVarianceLoss, self).__init__(**kwargs)\n\n self.num_channels = num_channels\n self.padding = (temporal_kernels - 1) // 2, (spatial_kernels - 1) // 2, (spatial_kernels - 1) // 2\n\n weights = np.ones([num_channels, 1, temporal_kernels, spatial_kernels, spatial_kernels], dtype=np.float32)\n weights /= temporal_kernels * spatial_kernels * spatial_kernels\n self.register_buffer('weights', torch.from_numpy(weights))\n\n self.hard_values = hard_values\n self.limits = limits\n assert len(self.limits) == 2\n assert self.limits[0] < self.limits[1]\n self.threshold = threshold\n\n def _forward(self, values):\n soft_values = F.conv3d(values, self.weights, None, 1, self.padding, 1, self.num_channels)\n\n if self.hard_values:\n trg_values = torch.where(soft_values < self.threshold,\n torch.full_like(soft_values, self.limits[0]),\n torch.full_like(soft_values, self.limits[1]))\n else:\n trg_values = soft_values\n\n losses = torch.abs(values - trg_values)\n out = losses.mean()\n\n return out\n","sub_path":"mmaction/models/losses/total_variance_loss.py","file_name":"total_variance_loss.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"651214162","text":"\n#def quicksort(A,p=0,r=-1)\ndef quicksort(A):\n #if r==-1:r=len(A)-1\n def real_quicksort(A, p, r):\n def partition(A, p, r):\n placeholder = \"~PH~\"\n compare = A[r]\n inserts = 0\n # partition\n for j in range(p, r):\n if A[j] >= compare:\n inserts+=1\n A.insert(r+inserts, A[j])\n A[j] = placeholder\n # PH removen\n for j in range(0, inserts):\n A.remove(placeholder)\n \n # das Splitelement hat sich genau um die Anzahl der Inserts nach links verschoben\n return (r-inserts)\n\n if p\"]=len(word2id)\n from collections import Counter\n for sentence,label_sentence in zip(sentence_list,label_list):\n for word in sentence:\n all_words.append(word)\n for label in label_sentence:\n label_set.add(label)\n label_=list(label_set)\n for label in label_:\n label2id[label]=len(label2id)\n counter=Counter(all_words)\n import operator\n sorted_list=sorted(counter.items(),key=operator.itemgetter(1),reverse=True)\n for word,freq in sorted_list:\n word2id[word]=len(word2id)\n word2id[\"\"]=len(word2id)\n \n parameter=(word2id,label2id)\n with open(\"./word_label2id.pkl\",\"wb\") as f:\n pickle.dump(parameter,f)\n\n#get_word_label2id(data,target)\n#sentence_list,label_list=data,target\nwith open(\"./word_label2id.pkl\",\"rb\") as f:\n word2id,label2id=pickle.load(f)\n\n#def sentence_to_id(sentence_list,label_list):\n# id_sentence_list=[]\n# id_label_list=[]\n# for sentence,label_sentence in zip(sentence_list,label_list):\n# assert len(sentence)==len(label_sentence)\n# id_sentence_list.append([word2id[word] for word in sentence])\n# id_label_list.append([label2id[label] for label in label_sentence])\n# return id_sentence_list,id_label_list\n#id_sentence_list,id_label_list=sentence_to_id(sentence_list,label_list)\n#max_seq_length=max(len(sentence) for sentence in id_sentence_list)\nmax_seq_length=178\ndef pad_function(id_sentence_list,id_label_list):\n assert len(id_sentence_list)==len(id_label_list)\n for id_sentence,id_label in zip(id_sentence_list,id_label_list):\n assert len(id_sentence)==len(id_label)\n pad_sentences=tf.keras.preprocessing.sequence.pad_sequences(id_sentence_list,padding=\"post\",\n value=word2id[\"\"],maxlen=max_seq_length)\n pad_labels=tf.keras.preprocessing.sequence.pad_sequences(id_label_list,padding=\"post\",\n value=word2id[\"\"],maxlen=max_seq_length)\n return pad_sentences,pad_labels\n#pad_sentences,pad_labels=pad_function(id_sentence_list,id_label_list)\n\n#dataset=tf.data.Dataset.from_tensor_slices((pad_sentences,pad_labels))\n#train_batch_size=64\n#dataset=dataset.shuffle(2000).batch(batch_size=train_batch_size,drop_remainder=True)\nnum_classes=len(label2id)\nvocab_size=len(word2id)\n\nclass build_model(tf.keras.Model):\n def __init__(self,fc_dim,batch_size_,embedding_dim,bilstm_layers):\n super(build_model,self).__init__()\n self.batch_size=batch_size_\n self.fc_dim=fc_dim\n forward_layer1,backward_layer1,forward_layer2,backward_layer2=bilstm_layers\n self.Embedding_layer=tf.keras.layers.Embedding(input_dim=vocab_size,output_dim=embedding_dim,\n batch_input_shape=[self.batch_size,max_seq_length])\n self.bilstm_layer1=tf.keras.layers.Bidirectional(layer=forward_layer1,backward_layer=backward_layer1,merge_mode=\"concat\")\n self.bilstm_layer2=tf.keras.layers.Bidirectional(layer=forward_layer2,backward_layer=backward_layer2,merge_mode=\"concat\")\n self.fc_layer=tf.keras.layers.Dense(units=fc_dim,activation=\"relu\")\n self.output_layer=tf.keras.layers.Dense(units=num_classes)\n #(batch_size,max_seq_length,num_classes)\n def call(self,input_tensor):\n return self.output_layer(self.fc_layer(self.bilstm_layer2(self.bilstm_layer1(self.Embedding_layer(input_tensor)))))\n\nfc_dim=128\nembedding_dim=100\nforward_layer1=tf.keras.layers.LSTM(units=64,return_sequences=True,go_backwards=False,time_major=False)\nbackward_layer1=tf.keras.layers.LSTM(units=64,return_sequences=True,go_backwards=True,time_major=False)\nforward_layer2=tf.keras.layers.LSTM(units=128,return_sequences=True,go_backwards=False,time_major=False)\nbackward_layer2=tf.keras.layers.LSTM(units=128,return_sequences=True,go_backwards=True,time_major=False)\nbilstm_layers=(forward_layer1,backward_layer1,forward_layer2,backward_layer2)\n\n#model=build_model(fc_dim=fc_dim,batch_size_=train_batch_size,bilstm_layers=bilstm_layers,embedding_dim=embedding_dim)\ndef loss_fn(real_data,target_):\n mask=tf.math.logical_not(tf.math.equal(real_data,word2id[\"\"]))\n mask=tf.cast(mask,dtype=tf.float32)\n loss_object=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True,reduction=\"none\")\n loss=loss_object(real_data,target_)\n loss=loss*mask\n return tf.reduce_mean(loss)\n\n#model.compile(loss=loss_fn,optimizer=tf.keras.optimizers.Adam())\n#model.fit(dataset,epochs=30,verbose=2)\nprint(\"*\"*200)\n#model.save_weights(filepath=\"./simple_model.ckpt\")\nnew_model=build_model(fc_dim=fc_dim,batch_size_=1,bilstm_layers=bilstm_layers,embedding_dim=embedding_dim)\nnew_model.compile(loss=loss_fn,optimizer=tf.keras.optimizers.Adam())\n\nnew_model.load_weights(filepath=\"./simple_model.ckpt\")\n\nprint(\"*\"*500)\n\n\ntest_sentence=\"武汉市是湖北省的省会,马云是阿里巴巴的创始人\"\ntest_sentence_id=[word2id.get(word,word2id[\"\"]) for word in test_sentence]\ntest_padding=tf.keras.preprocessing.sequence.pad_sequences([[test_sentence_id]],maxlen=178,padding=\"post\",value=0)\n\nresult=new_model(test_padding)\nassert result.shape==(1,178,num_classes)\nresult=tf.squeeze(result,axis=0)\nresult=result[:len(test_sentence)]\npredict=tf.argmax(result,axis=-1)\n\n\nfor i,j in zip(predict.numpy(),test_sentence):\n print(id2label[i],\": \",test_sentence[j])\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"237378831","text":"# Import modules.\nimport grid\n\n# Gathers input from human player.\ndef get_input() :\n print('Please select the top left point of your line.')\n column = input('\\tColumn: (a-d): ')\n row = input('\\tRow: (1-4): ')\n direction = input('\\tDirection: (h or v): ')\n \n # Start counting from 0.\n column = ord(column) - ord('a')\n row = int(row) - 1\n \n # Checks to see if player's entry is valid.\n if(column >= grid.BOARD_WIDTH or column < 0) :\n print('The column entered is invalid, it must be a value between a and d.')\n get_input()\n elif(row >= grid.BOARD_HEIGHT or row < 0) :\n print('The row entered is invalid, it must be a value between 1 and 4.')\n get_input()\n if(direction == 'h' and column == grid.BOARD_WIDTH - 1) :\n print('The line does not fit on this grid. Please enter a new line.')\n get_input()\n elif(direction == 'v' and row == grid.BOARD_HEIGHT - 1) :\n print('The line does not fit on this grid. Please enter a new line.')\n get_input()\n else :\n if(direction != 'h' and direction != 'v') :\n print('The direction entered must be horizontal or vertical.')\n get_input()\n else :\n overlap = grid.paddocks_board.has_overlap(row, column, direction)\n if(overlap == True) :\n print('The line entered overlaps another one! Please place a new line.')\n get_input()\n else :\n initial = 'P'\n grid.paddocks_board.set_line(row, column, direction, initial)","sub_path":"paddocks/human.py","file_name":"human.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"579238925","text":"class Solution:\n def permuteUnique(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n nums.sort()\n solution_set = []\n\n def permutation(nums, current_permutation):\n if len(nums) == 0:\n solution_set.append(current_permutation)\n else:\n previous_value = None\n for i in range(len(nums)):\n if nums[i] == previous_value:\n continue\n else:\n next_permutation = current_permutation + [nums[i]]\n permutation(nums[:i] + nums[i+1:], next_permutation)\n previous_value = nums[i]\n\n permutation(nums, [])\n\n return solution_set\n\n","sub_path":"Codes/47_Permutations_II/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"294555127","text":"import os\nfrom collections import namedtuple\n\nfrom ..basic import cnn_text\nfrom ..basic import factorization_machine\nfrom ..basic import embedding_layer, embedding_lookup\nfrom CDRTR.utils import pkdump\n\nimport tensorflow as tf\n\nbatchConfig = namedtuple(\"batchConfig\", [\"sentc_ipt\", \"rating\"])\n\n\nclass SentiRec(object):\n def __init__(self, sentc_len, vocab_size, embd_size,\n filter_sizes, num_filter, drop_value=0.5):\n self.sentc_ipt = tf.placeholder(\n dtype=tf.int32,\n shape=[None, sentc_len],\n name=\"sentc_ipt\")\n self.rating = tf.placeholder(\n dtype=tf.float32,\n shape=[None])\n self.W = embedding_layer(vocab_size, embd_size)\n self.sentc_map = embedding_lookup(self.sentc_ipt, self.W)\n self.sentc_ft, self.drop = cnn_text(sentc_len, filter_sizes,\n num_filter, self.sentc_map)\n self.drop_value = drop_value\n self.sentc_cnn_out = tf.expand_dims(self.sentc_ft, 1)\n self.rating_prd = factorization_machine(self.sentc_cnn_out)\n diff = self.rating_prd - self.rating\n self.loss = tf.reduce_sum(tf.pow(diff, 2))\n self.mae = tf.reduce_mean(tf.abs(diff))\n self.mse = tf.reduce_mean(tf.square(diff))\n self.rmse = tf.sqrt(self.mse)\n self.optimizer = tf.train.AdamOptimizer(0.001)\n tf.summary.scalar(\"mae\", self.mae)\n tf.summary.scalar(\"rmse\", self.rmse)\n self.merged = tf.summary.merge_all()\n self.train_op = self.optimizer.minimize(self.loss)\n self.init = tf.global_variables_initializer()\n\n def initSess(self, sess):\n sess.run(self.init)\n\n def _buildDict(self, batch):\n _mapper = {\n \"sentc_ipt\": self.sentc_ipt,\n \"rating\": self.rating\n }\n feed_dict = {_mapper[k]: batch[k] for k in batch}\n feed_dict[self.drop] = self.drop_value\n return feed_dict\n\n def trainBatch(self, sess, batch):\n try:\n _, loss, mae, rmse = sess.run(\n [self.train_op, self.loss, self.mae, self.rmse],\n feed_dict=self._buildDict(batch)\n )\n except :\n pkdump(batch, \"debug.pk\")\n raise\n if not os.path.exists(\"debugnormal.pk\"):\n pkdump(batch, \"debugnormal.pk\")\n return loss, mae, rmse\n\n def getSummary(self, sess, batch):\n summary = sess.run(self.merged,\n feed_dict=self._buildDict(batch)\n )\n return summary\n\n def predict(self, sess, batch):\n pred = sess.run(self.rating_prd,\n feed_dict={\n self.sentc_ipt: batch[\"sentc_ipt\"],\n self.drop: self.drop_value}\n )\n return pred\n\n def outputVector(self, sess, batch):\n sentc_cnn_opt = sess.run(self.sentc_cnn_out,\n feed_dict={self.sentc_ipt: batch, self.drop:self.drop_value}\n )\n return sentc_cnn_opt\n","sub_path":"CDRTR/core/DeepModel/SentiRec/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"212404319","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 25 14:32:50 2018\n\n@author: Yung-Yu Tsai\n\nevaluate quantized testing result with custom Keras quantize layer \n\"\"\"\n\nimport keras\nfrom keras.utils import multi_gpu_model,to_categorical\nfrom simulator.models.mobilenet import QuantizedMobileNetV1FusedBN\nfrom simulator.utils_tool.dataset_setup import dataset_setup\nfrom simulator.utils_tool.confusion_matrix import show_confusion_matrix\nfrom simulator.metrics.topk_metrics import top5_acc\nimport time\nfrom simulator.memory.mem_bitmap import bitmap\nfrom simulator.memory.tile import tile, tile_FC, generate_layer_memory_mapping\nfrom simulator.testing.fault_core import generate_model_modulator\nfrom simulator.metrics.FT_metrics import acc_loss, relative_acc, pred_miss, top5_pred_miss, conf_score_vary_10, conf_score_vary_50\nfrom simulator.inference.evaluate import evaluate_FT\n\n# dimensions of our images.\nimg_width, img_height = 224, 224\n\nset_size=2\nclass_number=1000\nbatch_size=20\nmodel_word_length=16\nmodel_fractional_bit=9\nrounding_method='nearest'\nif set_size in [50,'full',None]:\n validation_data_dir = '../../dataset/imagenet_val_imagedatagenerator'\nelse:\n validation_data_dir = '../../dataset/imagenet_val_imagedatagenerator_setsize_%d'%set_size\n\n# memory fault simulation parameter\nfault_rate=0.0001\n\nrow_ifmap=98\ncol_ifmap=16*8\nword_ifmap=4\n\nrow_ofmap=98\ncol_ofmap=16*8\nword_ofmap=4\n\nrow_wght=64+1\ncol_wght=16*8\nword_wght=4\n\nmodel_wl=model_word_length\n\nmemory_column_priority=['Tm','Tc','Tr','Tn']\nmemory_row_priority=['Tr','Tm','Tc','Tn']\n\nfast_mode=True\n\n#%%\n# fault generation\n\n# model for get configuration\nmodel = QuantizedMobileNetV1FusedBN(weights='../mobilenet_1_0_224_tf_fused_BN.h5', \n nbits=model_word_length,\n fbits=model_fractional_bit, \n rounding_method=rounding_method,\n batch_size=batch_size,\n quant_mode=None)\n\nmodel_ifmap_fault_dict_list=[None for i in range(75)]\nmodel_ofmap_fault_dict_list=[None for i in range(75)] \nmodel_weight_fault_dict_list=[[None,None] for i in range(75)]\n\n# memory mapping\nGLB_wght=bitmap(row_wght, col_wght*word_wght*model_wl, wl=model_wl) # 65KB\nGLB_ifmap=bitmap(row_ifmap, col_ifmap*word_ifmap*model_wl, wl=model_wl) # 98KB\nGLB_ofmap=bitmap(row_ofmap, col_ofmap*word_ofmap*model_wl, wl=model_wl) # 98KB\n\n# assign fault dictionary\nGLB_wght.gen_bitmap_SA_fault_dict(fault_rate,fast_gen=True)\nGLB_ifmap.gen_bitmap_SA_fault_dict(fault_rate,fast_gen=True)\nGLB_ofmap.gen_bitmap_SA_fault_dict(fault_rate,fast_gen=True)\n\n#%%\n# tile setting\n\n# standard conv1\nofmap_tile_conv1=tile((1,38,38,32),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_conv1=tile((1,76,76,4),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_conv1 =tile((3,3,4,32),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# DW conv2\nofmap_tile_DW2=tile((1,112,112,4),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_DW2=tile((1,112,112,4),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_DW2 =tile((3,3,4,1),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# PW conv3\nofmap_tile_PW3=tile((1,38,38,32),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_PW3=tile((1,38,38,16),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_PW3 =tile((1,1,16,32),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# DW conv4\nofmap_tile_DW4=tile((1,28,28,16),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_DW4=tile((1,56,56,16),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_DW4 =tile((3,3,16,1),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# PW conv5\nofmap_tile_PW5=tile((1,28,28,64),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_PW5=tile((1,28,28,64),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_PW5 =tile((1,1,64,64),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# DW conv6\nofmap_tile_DW6=tile((1,56,56,16),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_DW6=tile((1,56,56,16),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_DW6 =tile((3,3,16,1),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# PW conv7\nofmap_tile_PW7=tile((1,56,56,16),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_PW7=tile((1,56,56,8),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_PW7 =tile((1,1,8,16),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# DW conv8\nofmap_tile_DW8=tile((1,28,28,16),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_DW8=tile((1,56,56,16),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_DW8 =tile((3,3,16,1),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# PW conv9\nofmap_tile_PW9=tile((1,28,28,32),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_PW9=tile((1,28,28,64),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_PW9 =tile((1,1,64,32),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# DW conv10\nofmap_tile_DW10=tile((1,28,28,64),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_DW10=tile((1,28,28,64),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_DW10 =tile((3,3,64,1),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# PW conv11\nofmap_tile_PW11=tile((1,28,28,32),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_PW11=tile((1,28,28,64),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_PW11 =tile((1,1,64,32),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# DW conv12\nofmap_tile_DW12=tile((1,14,14,64),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_DW12=tile((1,28,28,64),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_DW12 =tile((3,3,64,1),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# PW conv13\nofmap_tile_PW13=tile((1,14,14,128),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_PW13=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_PW13 =tile((1,1,256,128),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# DW conv14\nofmap_tile_DW14=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_DW14=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_DW14 =tile((3,3,256,1),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# PW conv15\nofmap_tile_PW15=tile((1,14,14,128),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_PW15=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_PW15 =tile((1,1,256,128),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# DW conv16\nofmap_tile_DW16=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_DW16=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_DW16 =tile((3,3,256,1),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# PW conv17\nofmap_tile_PW17=tile((1,14,14,128),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_PW17=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_PW17 =tile((1,1,256,128),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# DW conv18\nofmap_tile_DW18=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_DW18=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_DW18 =tile((3,3,256,1),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# PW conv19\nofmap_tile_PW19=tile((1,14,14,128),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_PW19=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_PW19 =tile((1,1,256,128),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# DW conv20\nofmap_tile_DW20=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_DW20=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_DW20 =tile((3,3,256,1),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# PW conv21\nofmap_tile_PW21=tile((1,14,14,128),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_PW21=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_PW21 =tile((1,1,256,128),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# DW conv22\nofmap_tile_DW22=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_DW22=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_DW22 =tile((3,3,256,1),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# PW conv23\nofmap_tile_PW23=tile((1,14,14,128),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_PW23=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_PW23 =tile((1,1,256,128),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# DW conv24\nofmap_tile_DW24=tile((1,7,7,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_DW24=tile((1,14,14,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_DW24 =tile((3,3,256,1),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# PW conv25\nofmap_tile_PW25=tile((1,7,7,128),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_PW25=tile((1,7,7,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_PW25 =tile((1,1,256,128),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# DW conv26\nofmap_tile_DW26=tile((1,7,7,512),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_DW26=tile((1,7,7,512),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_DW26 =tile((3,3,512,1),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# PW conv27\nofmap_tile_PW27=tile((1,7,7,128),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_PW27=tile((1,7,7,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_PW27 =tile((1,1,256,128),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n# Pred conv (fc1000)\nofmap_tile_fc=tile((1,1,1,128),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nifmap_tile_fc=tile((1,1,1,256),is_fmap=True,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\nwght_tile_fc =tile((1,1,256,128),is_fmap=False,wl=model_wl,row_prior=memory_row_priority,col_prior=memory_column_priority)\n\n#%%\n# generate fault dictionary\nmodel_ifmap_fault_dict_list[2],model_ofmap_fault_dict_list[2],model_weight_fault_dict_list[2]\\\n=generate_layer_memory_mapping(model.layers[2],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_conv1,wght_tile_conv1,ofmap_tile_conv1,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[5],model_ofmap_fault_dict_list[5],model_weight_fault_dict_list[5]\\\n=generate_layer_memory_mapping(model.layers[5],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_DW2,wght_tile_DW2,ofmap_tile_DW2,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[7],model_ofmap_fault_dict_list[7],model_weight_fault_dict_list[7]\\\n=generate_layer_memory_mapping(model.layers[7],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_PW3,wght_tile_PW3,ofmap_tile_PW3,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[10],model_ofmap_fault_dict_list[10],model_weight_fault_dict_list[10]\\\n=generate_layer_memory_mapping(model.layers[10],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_DW4,wght_tile_DW4,ofmap_tile_DW4,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[12],model_ofmap_fault_dict_list[12],model_weight_fault_dict_list[12]\\\n=generate_layer_memory_mapping(model.layers[12],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_PW5,wght_tile_PW5,ofmap_tile_PW5,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[15],model_ofmap_fault_dict_list[15],model_weight_fault_dict_list[15]\\\n=generate_layer_memory_mapping(model.layers[15],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_DW6,wght_tile_DW6,ofmap_tile_DW6,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[17],model_ofmap_fault_dict_list[17],model_weight_fault_dict_list[17]\\\n=generate_layer_memory_mapping(model.layers[17],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_PW7,wght_tile_PW7,ofmap_tile_PW7,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[20],model_ofmap_fault_dict_list[20],model_weight_fault_dict_list[20]\\\n=generate_layer_memory_mapping(model.layers[20],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_DW8,wght_tile_DW8,ofmap_tile_DW8,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[22],model_ofmap_fault_dict_list[22],model_weight_fault_dict_list[22]\\\n=generate_layer_memory_mapping(model.layers[22],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_PW9,wght_tile_PW9,ofmap_tile_PW9,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[25],model_ofmap_fault_dict_list[25],model_weight_fault_dict_list[25]\\\n=generate_layer_memory_mapping(model.layers[25],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_DW10,wght_tile_DW10,ofmap_tile_DW10,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[27],model_ofmap_fault_dict_list[27],model_weight_fault_dict_list[27]\\\n=generate_layer_memory_mapping(model.layers[27],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_PW11,wght_tile_PW11,ofmap_tile_PW11,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[30],model_ofmap_fault_dict_list[30],model_weight_fault_dict_list[30]\\\n=generate_layer_memory_mapping(model.layers[30],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_DW12,wght_tile_DW12,ofmap_tile_DW12,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[32],model_ofmap_fault_dict_list[32],model_weight_fault_dict_list[32]\\\n=generate_layer_memory_mapping(model.layers[32],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_PW13,wght_tile_PW13,ofmap_tile_PW13,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[35],model_ofmap_fault_dict_list[35],model_weight_fault_dict_list[35]\\\n=generate_layer_memory_mapping(model.layers[35],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_DW14,wght_tile_DW14,ofmap_tile_DW14,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[37],model_ofmap_fault_dict_list[37],model_weight_fault_dict_list[37]\\\n=generate_layer_memory_mapping(model.layers[37],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_PW15,wght_tile_PW15,ofmap_tile_PW15,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[40],model_ofmap_fault_dict_list[40],model_weight_fault_dict_list[40]\\\n=generate_layer_memory_mapping(model.layers[40],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_DW16,wght_tile_DW16,ofmap_tile_DW16,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[42],model_ofmap_fault_dict_list[42],model_weight_fault_dict_list[42]\\\n=generate_layer_memory_mapping(model.layers[42],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_PW17,wght_tile_PW17,ofmap_tile_PW17,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[45],model_ofmap_fault_dict_list[45],model_weight_fault_dict_list[45]\\\n=generate_layer_memory_mapping(model.layers[45],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_DW18,wght_tile_DW18,ofmap_tile_DW18,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[47],model_ofmap_fault_dict_list[47],model_weight_fault_dict_list[47]\\\n=generate_layer_memory_mapping(model.layers[47],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_PW19,wght_tile_PW19,ofmap_tile_PW19,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[50],model_ofmap_fault_dict_list[50],model_weight_fault_dict_list[50]\\\n=generate_layer_memory_mapping(model.layers[50],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_DW20,wght_tile_DW20,ofmap_tile_DW20,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[52],model_ofmap_fault_dict_list[52],model_weight_fault_dict_list[52]\\\n=generate_layer_memory_mapping(model.layers[52],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_PW21,wght_tile_PW21,ofmap_tile_PW21,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[55],model_ofmap_fault_dict_list[55],model_weight_fault_dict_list[55]\\\n=generate_layer_memory_mapping(model.layers[55],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_DW22,wght_tile_DW22,ofmap_tile_DW22,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[57],model_ofmap_fault_dict_list[57],model_weight_fault_dict_list[57]\\\n=generate_layer_memory_mapping(model.layers[57],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_PW23,wght_tile_PW23,ofmap_tile_PW23,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[60],model_ofmap_fault_dict_list[60],model_weight_fault_dict_list[60]\\\n=generate_layer_memory_mapping(model.layers[60],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_DW24,wght_tile_DW24,ofmap_tile_DW24,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[62],model_ofmap_fault_dict_list[62],model_weight_fault_dict_list[62]\\\n=generate_layer_memory_mapping(model.layers[62],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_PW25,wght_tile_PW25,ofmap_tile_PW25,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[65],model_ofmap_fault_dict_list[65],model_weight_fault_dict_list[65]\\\n=generate_layer_memory_mapping(model.layers[65],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_DW26,wght_tile_DW26,ofmap_tile_DW26,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[67],model_ofmap_fault_dict_list[67],model_weight_fault_dict_list[67]\\\n=generate_layer_memory_mapping(model.layers[67],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_PW27,wght_tile_PW27,ofmap_tile_PW27,\n fast_mode=fast_mode)\nmodel_ifmap_fault_dict_list[72],model_ofmap_fault_dict_list[72],model_weight_fault_dict_list[72]\\\n=generate_layer_memory_mapping(model.layers[72],\n GLB_ifmap,GLB_wght,GLB_ofmap,\n ifmap_tile_fc,wght_tile_fc,ofmap_tile_fc,\n fast_mode=fast_mode)\n\n#%%\n# generate modulator\n\nmodel_ifmap_fault_dict_list, model_ofmap_fault_dict_list, model_weight_fault_dict_list\\\n=generate_model_modulator(model,\n model_word_length,\n model_fractional_bit,\n model_ifmap_fault_dict_list, \n model_ofmap_fault_dict_list, \n model_weight_fault_dict_list,\n fast_gen=True)\n\n#%%\n# model setup\n\nprint('Building model...')\n\nt = time.time()\n\nmodel = QuantizedMobileNetV1FusedBN(weights='../mobilenet_1_0_224_tf_fused_BN.h5', \n nbits=model_word_length,\n fbits=model_fractional_bit, \n rounding_method=rounding_method,\n batch_size=batch_size,\n quant_mode='hybrid',\n ifmap_fault_dict_list=model_ifmap_fault_dict_list,\n ofmap_fault_dict_list=model_ofmap_fault_dict_list,\n weight_fault_dict_list=model_weight_fault_dict_list)\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', top5_acc])\n\nt = time.time()-t\n\nmodel.summary()\n\nprint('model build time: %f s'%t)\n\n# multi GPU model\n\n#print('Building multi GPU model...')\n#\n#t = time.time()\n#parallel_model = multi_gpu_model(model, gpus=2)\n#parallel_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', top5_acc])\n#\n#parallel_model.summary()\n#\n#t = time.time()-t\n#\n#print('multi GPU model build time: %f s'%t)\n\n#%%\n#dataset setup\n\nprint('preparing dataset...')\nx_train, x_test, y_train, y_test, class_indices, datagen, input_shape = dataset_setup('ImageDataGenerator', img_rows = img_width, img_cols = img_height, batch_size = batch_size, data_augmentation = False, data_dir = validation_data_dir)\nprint('dataset ready')\n\n\n#%%\n# test\n\nt = time.time()\nprint('evaluating...')\n\nfrom keras.losses import categorical_crossentropy\n#prediction = parallel_model.predict_generator(datagen, verbose=1, steps=len(datagen))\nprediction = model.predict_generator(datagen, verbose=1, steps=len(datagen))\ntest_result = evaluate_FT('mobilenet',prediction=prediction,test_label=to_categorical(datagen.classes,1000),loss_function=categorical_crossentropy,metrics=['accuracy',top5_acc,acc_loss,relative_acc,pred_miss,top5_pred_miss,conf_score_vary_10,conf_score_vary_50],fuseBN=True,setsize=set_size)\n\nt = time.time()-t\nprint('\\nruntime: %f s'%t)\nfor key in test_result.keys():\n print('Test %s\\t:'%key, test_result[key])\n\n#%%\n# draw confusion matrix\n\n#print('\\n')\n#prediction = model.predict_generator(datagen, verbose=1, steps=len(datagen))\n#prediction = np.argmax(prediction, axis=1)\n#\n#show_confusion_matrix(datagen.classes,prediction,datagen.class_indices.keys(),'Confusion Matrix',figsize=(10,8),normalize=False,big_matrix=True)\n\n","sub_path":"memory_fault_sim_MobileNet_v1_ILSVRC2012_val.py","file_name":"memory_fault_sim_MobileNet_v1_ILSVRC2012_val.py","file_ext":"py","file_size_in_byte":25799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"610977869","text":"def countBombs(y,x):\n\tcount = 0\n\tfor n in range(y-1,y+2):\n\t\tfor m in range(x-1,x+2):\n\t\t\t# print(n)\n\t\t\t# print(m)\n\t\t\tif n < 1 or m < 0\\\n\t\t\t or n > int(inputsMassNum[0]) or m > int(inputsMassNum[1])-1:\n\t\t\t\tpass\n\t\t\telif inputsBombs[n][m] == \"#\":\n\t\t\t\tcount += 1\n\n\treturn count\n\t\n\ninputsBombs = [[]] \n\n#3*3想定\nif __name__ == '__main__':\n\tinput1 = input()\n\tinputsMassNum = input1.split()#3 3\n\n\tfor i in range(0,int(inputsMassNum[0])):#0~3(3回)\n\t\t# print(i)\n\t\tinput2 = input()#\n\t\tinputsBombs.append(list(input2)) #[0][]は空\n\t\t# [ ] \n\t\t#\t###\n\t\t#\t#.#\n\t\t#\t###\n\n\t\t#inputBombs,「1」要素に追加\n\t\t# print(inputsBombs[i+1])\n\t\t# print(\"\")\n\n\tfor y in range(1,int(inputsMassNum[0])+1):#0~3\n\t\tfor x in range(0,int(inputsMassNum[1])):\n\t\t\t# print(inputsBombs[y][x])\n\t\t\tif inputsBombs[y][x] == \".\":#\n\t\t\t\ttmp = countBombs(y,x)\n\t\t\t\tinputsBombs[y][x] = tmp\n\n\n\tinputsBombs.pop(0)\n\t# for i in range(0,int(inputsMassNum[0])):\n\t# \tprint(inputsBombs)\n\n\tfor i in range(0,int(inputsMassNum[0])):\n\t\tfor j in range(0,int(inputsMassNum[1])):\n\t\t\tprint(inputsBombs[i][j],end = \"\")\n\t\tprint(\"\")\n","sub_path":"inazuma/python/Bprob/075.py","file_name":"075.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"171850778","text":"import tensorflow as tf\n\n\n# p(x_hat | z)\n\nclass Decoder(object):\n def __init__(self, dim_output):\n self.dim_output = dim_output\n\n def output(self, inputs, reuse=False):\n with tf.variable_scope('encoder', reuse=reuse):\n z_reshape = tf.expand_dims(inputs, 1) # N, 1, 50\n\n lstm_cell = tf.contrib.rnn.OutputProjectionWrapper(\n tf.nn.rnn_cell.LSTMCell(name='basic_lstm_cell', num_units=12, activation=tf.nn.relu),\n output_size=1024) # N, 1, 1024\n\n x_hat, _ = tf.nn.dynamic_rnn(lstm_cell, z_reshape, dtype=tf.float32)\n\n x_hat_flat = tf.reshape(x_hat, shape=[-1, 1024])\n\n # x_hat_flat = tf.contrib.layers.fully_connected(inputs=inputs, num_outputs=1024, activation_fn=tf.nn.relu)\n\n return x_hat_flat\n","sub_path":"week20/saved_model/04/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"466933214","text":"def relacion(diccionario):\n#calcula el valor de la relacion habitantes/CAS\n for key, value in diccionario.items():\n relacion = value[0] / value[1]\n diccionario[key][2] = relacion\n return\n\ndef print_a(diccionario):\n#hace el print del ejercicio a\n for key, value in diccionario.items():\n print(\"hay {0} habitantes y {1} CAS en {2}\".format(value[0], value[1], key))\n return\n\ndef print_b(diccionario):\n# hace el print del ejercicio b\n lista = []\n lista2 = []\n for key, value in diccionario.items():\n lista.append(value[2])\n lista.append(key)\n lista2.append(lista)\n lista = []\n lista2.sort(reverse=True)\n for elementos in lista2[0:5]:\n print(\"localidad: \", elementos[1], \" - relacion: \", elementos[0])\n return \n\n#principal\ndiccionario = {}\nciclo = True\nwhile ciclo:\n localidades = input(\"ingrese localidad: \")\n cantidad_de_habitantes = int(input(\"ingrese cantidad de habitantes: \"))\n cantidad_de_CAS = int(input(\"ingrese cantidad de CAS: \"))\n if localidades in diccionario:\n diccionario[localidades][0] += cantidad_de_habitantes\n diccionario[localidades][1] += cantidad_de_CAS\n else:\n diccionario[localidades] = [cantidad_de_habitantes, cantidad_de_CAS, 0]\n continuar = input(\"desea continuar?(si/no)\")\n if continuar == \"no\":\n relacion(diccionario)\n ciclo = False\nimprimir_a = print_a(diccionario)\nimprimir_b = print_b(diccionario)","sub_path":"parciales/primer recuperatorio 2018 - ejercicio 3.py","file_name":"primer recuperatorio 2018 - ejercicio 3.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"32241487","text":"\nfrom commons.method_executor import Method_Executor\nfrom sframe import SFrame\nimport time\nimport pandas as pd\nimport editdistance\nimport networkx as nx\nfrom sklearn.metrics import precision_score, accuracy_score, recall_score, precision_recall_fscore_support, f1_score\nimport phonetics\n\n__author__ = \"Aviad Elyashar\"\n\n\nclass SimilarNameAdvisor(Method_Executor):\n def __init__(self, db):\n Method_Executor.__init__(self, db)\n self._input_directory_path = self._config_parser.eval(self.__class__.__name__, \"input_directory_path\")\n self._target_file_name = self._config_parser.eval(self.__class__.__name__, \"target_file_name\")\n self._ground_truth_file_name = self._config_parser.eval(self.__class__.__name__, \"ground_truth_file_name\")\n self._output_directory_path = self._config_parser.eval(self.__class__.__name__, \"output_directory_path\")\n self._results_file_name = self._config_parser.eval(self.__class__.__name__, \"results_file_name\")\n self._ranking_function = self._config_parser.eval(self.__class__.__name__, \"ranking_function\")\n\n def _read_csv_file(self, delimiter=\"\\t\"):\n print(\"Reading CSV file \")\n begin_time = time.time()\n # for Micky's big file\n # wikitree_sf = SFrame.read_csv(self._input_directory_path + self._target_file_name, delimiter=\"\\t\")\n wikitree_sf = SFrame.read_csv(self._input_directory_path + self._target_file_name, delimiter=delimiter)\n end_time = time.time()\n run_time = end_time - begin_time\n print(run_time)\n return wikitree_sf\n\n\n def create_distinct_name_list(self):\n names_df = pd.read_csv(self._input_directory_path + self._target_file_name)\n child_first_name_series = names_df[\"Child_First_Name\"]\n child_first_names = child_first_name_series.tolist()\n\n ancestor_first_name_series = names_df[\"Ancestor_First_Name\"]\n ancestor_first_names = ancestor_first_name_series.tolist()\n\n all_names = child_first_names + ancestor_first_names\n all_names = list(set(all_names))\n sorted_names = [name for name in all_names if len(name) > 1]\n sorted_names = sorted(sorted_names)\n sorted_names_df = pd.DataFrame(sorted_names, columns=['Name:'])\n sorted_names_df.to_csv(self._output_directory_path + self._results_file_name, index=False)\n\n def provide_suggestions(self):\n ground_truth_df = pd.read_csv(self._input_directory_path + self._ground_truth_file_name)\n original_names_series = ground_truth_df[\"Name\"]\n original_names = original_names_series.tolist()\n original_names = list(set(original_names))\n\n original_names = sorted(original_names)\n\n edges_df = pd.read_csv(self._input_directory_path + self._target_file_name)\n name_graph = nx.from_pandas_edgelist(edges_df, 'Ancestor_First_Name',\n 'Child_First_Name', ['count'])\n\n dfs = []\n\n for i, original_name in enumerate(original_names):\n print(\"\\rSuggesting candidates for: {0} {1}/{2}\".format(original_name, i, len(original_names)), end='')\n\n if name_graph.has_node(original_name):\n #candidates_df = self._suggest_names_for_original_name(name_graph, original_name)\n candidates_df = getattr(self, self._ranking_function)(name_graph, original_name)\n dfs.append(candidates_df)\n\n results_df = pd.concat(dfs)\n results_df = results_df.sort_values(by=['Original', 'Rank'])\n results_df.to_csv(self._output_directory_path + self._results_file_name, index=False)\n print(\"Done provide_suggestions!!!!!\")\n return results_df, ground_truth_df\n\n\n def provide_suggestions_save_with_constant_name(self):\n ground_truth_df = pd.read_csv(self._input_directory_path + self._ground_truth_file_name)\n original_names_series = ground_truth_df[\"Name\"]\n original_names = original_names_series.tolist()\n original_names = list(set(original_names))\n\n original_names = sorted(original_names)\n\n edges_df = pd.read_csv(self._input_directory_path + self._target_file_name)\n name_graph = nx.from_pandas_edgelist(edges_df, 'Ancestor_First_Name',\n 'Child_First_Name', ['count'])\n\n dfs = []\n\n for i, original_name in enumerate(original_names):\n print(\"\\rSuggesting candidates for: {0} {1}/{2}\".format(original_name, i, len(original_names)), end='')\n\n if name_graph.has_node(original_name):\n #candidates_df = self._suggest_names_for_original_name(name_graph, original_name)\n candidates_df = getattr(self, self._ranking_function)(name_graph, original_name)\n dfs.append(candidates_df)\n\n results_df = pd.concat(dfs)\n results_df = results_df.sort_values(by=['Original', 'Rank'])\n results_df.to_csv(self._output_directory_path + \"suggestions.csv\", index=False)\n print(\"Done provide_suggestions!!!!!\")\n return results_df, ground_truth_df\n\n #\n # Recieve the graph of father and son edit distance 1 until 3.\n # The ranking is according to double metaphone from the original name with edit distance.\n #\n def suggest_name_based_graph_ranking_by_minimal_edit_distance_of_double_metaphone(self, name_graph, original_name):\n nodes = nx.single_source_shortest_path_length(name_graph, original_name, 3)\n nodes = list(nodes.items())\n\n original_name_series = [original_name] * len(nodes)\n candidates_df = pd.DataFrame(nodes, columns=['Candidate', 'Order'])\n candidates_df['Original'] = original_name_series\n candidates_df = candidates_df[['Original', 'Candidate', 'Order']]\n\n candidates_df = candidates_df[candidates_df[\"Order\"] != 0]\n candidates_df['Double_Metaphone_Primary_Original_Name'], \\\n candidates_df['Double_Metaphone_Secondary_Original_Name'] = list(zip(*candidates_df.apply(\n lambda x: get_phonetics_double_metaphone(x[\"Original\"]),\n axis=1)))\n candidates_df['Double_Metaphone_Primary_Candidate'], candidates_df['Double_Metaphone_Secondary_Candidate'] = list(zip(*candidates_df.apply(\n lambda x: get_phonetics_double_metaphone(x[\"Candidate\"]),\n axis=1)))\n candidates_df[\"Edit_Distance_Primary_DM_Original_Candidate\"] = \\\n candidates_df.apply(lambda x: calculate_edit_distance(x[\"Double_Metaphone_Primary_Original_Name\"], x[\"Double_Metaphone_Primary_Candidate\"]),\n axis=1)\n candidates_df[\"Edit_Distance_Secondary_DM_Original_Candidate\"] = \\\n candidates_df.apply(lambda x: calculate_edit_distance(x[\"Double_Metaphone_Secondary_Original_Name\"], x[\"Double_Metaphone_Secondary_Candidate\"]),\n axis=1)\n\n candidates_df[\"Edit_Distance_Primary_DM_Original_Secondary_Candidate\"] = \\\n candidates_df.apply(lambda x: calculate_edit_distance(x[\"Double_Metaphone_Primary_Original_Name\"],\n x[\"Double_Metaphone_Secondary_Candidate\"]),\n axis=1)\n candidates_df[\"Edit_Distance_Secondary_DM_Original_Primary_Candidate\"] = \\\n candidates_df.apply(lambda x: calculate_edit_distance(x[\"Double_Metaphone_Secondary_Original_Name\"],\n x[\"Double_Metaphone_Primary_Candidate\"]),\n axis=1)\n #candidates_df.to_csv(self._output_directory_path + \"Metaphone_Edit_distance_graph.csv\")\n candidates_df[\"Min_Edit_Distance_of_DM\"] = \\\n candidates_df.apply(lambda x: find_positive_min_value(x[\"Edit_Distance_Primary_DM_Original_Candidate\"],\n x[\"Edit_Distance_Secondary_DM_Original_Candidate\"],\n x[\"Edit_Distance_Primary_DM_Original_Secondary_Candidate\"],\n x[\"Edit_Distance_Secondary_DM_Original_Primary_Candidate\"]),\n axis=1)\n\n candidates_df[\"Rank\"] = candidates_df[\"Min_Edit_Distance_of_DM\"]\n #candidates_df = candidates_df.sort_values(by='Min_Edit_Distance_of_DM')\n candidates_df = candidates_df.sort_values(by='Rank')\n #head_candidates_df = candidates_df.head(10)\n return candidates_df\n\n #\n # Order^2 * Edit Distance\n #\n def suggest_names_by_order_2_and_ED(self, name_graph, original_name):\n nodes = nx.single_source_shortest_path_length(name_graph, original_name, 3)\n nodes = list(nodes.items())\n\n original_name_series = [original_name] * len(nodes)\n candidates_df = pd.DataFrame(nodes, columns=['Candidate', 'Order'])\n candidates_df['Original'] = original_name_series\n candidates_df = candidates_df[['Original', 'Candidate', 'Order']]\n\n candidates_df = candidates_df[candidates_df[\"Order\"] != 0]\n\n candidates_df['Edit_Distance'] = candidates_df.apply(lambda x: calculate_edit_distance(x[\"Original\"], x[\"Candidate\"]),\n axis=1)\n candidates_df['Shortest_Path'] = candidates_df.apply(\n lambda x: calculate_shortest_path(x[\"Original\"], x[\"Candidate\"], name_graph), axis=1)\n candidates_df['Rank'] = candidates_df.apply(lambda x: rank_candidate(x[\"Edit_Distance\"], x[\"Order\"], x[\"Shortest_Path\"]),\n axis=1)\n\n candidates_df = candidates_df.sort_values(by='Rank')\n #head_candidates_df = candidates_df.head(10)\n #return head_candidates_df\n return candidates_df\n\n # Order * Edit Distance\n def suggest_name_by_edit_distance_and_order(self, name_graph, original_name):\n nodes = nx.single_source_shortest_path_length(name_graph, original_name, 3)\n nodes = list(nodes.items())\n\n original_name_series = [original_name] * len(nodes)\n candidates_df = pd.DataFrame(nodes, columns=['Candidate', 'Order'])\n candidates_df['Original'] = original_name_series\n candidates_df = candidates_df[['Original', 'Candidate', 'Order']]\n\n candidates_df = candidates_df[candidates_df[\"Order\"] != 0]\n\n candidates_df['Edit_Distance'] = candidates_df.apply(lambda x: calculate_edit_distance(x[\"Original\"], x[\"Candidate\"]),\n axis=1)\n candidates_df['Rank'] = candidates_df.apply(lambda x: rank_candidate_ED_and_order(x[\"Edit_Distance\"], x[\"Order\"]),\n axis=1)\n\n candidates_df = candidates_df.sort_values(by='Rank')\n #head_candidates_df = candidates_df.head(10)\n #return head_candidates_df\n return candidates_df\n\n\n # Order * Edit Distance\n def suggest_name_by_edit_distance_and_order_and_ED_of_DM(self, name_graph, original_name):\n nodes = nx.single_source_shortest_path_length(name_graph, original_name, 3)\n nodes = list(nodes.items())\n\n original_name_series = [original_name] * len(nodes)\n candidates_df = pd.DataFrame(nodes, columns=['Candidate', 'Order'])\n candidates_df['Original'] = original_name_series\n candidates_df = candidates_df[['Original', 'Candidate', 'Order']]\n\n candidates_df = candidates_df[candidates_df[\"Order\"] != 0]\n\n candidates_df['Edit_Distance'] = candidates_df.apply(\n lambda x: calculate_edit_distance(x[\"Original\"], x[\"Candidate\"]),\n axis=1)\n\n candidates_df['Double_Metaphone_Primary_Original_Name'], \\\n candidates_df['Double_Metaphone_Secondary_Original_Name'] = list(zip(*candidates_df.apply(\n lambda x: get_phonetics_double_metaphone(x[\"Original\"]),\n axis=1)))\n candidates_df['Double_Metaphone_Primary_Candidate'], candidates_df['Double_Metaphone_Secondary_Candidate'] = list(zip(*candidates_df.apply(\n lambda x: get_phonetics_double_metaphone(x[\"Candidate\"]),\n axis=1)))\n candidates_df[\"Edit_Distance_Primary_DM_Original_Candidate\"] = \\\n candidates_df.apply(lambda x: calculate_edit_distance(x[\"Double_Metaphone_Primary_Original_Name\"], x[\"Double_Metaphone_Primary_Candidate\"]),\n axis=1)\n candidates_df[\"Edit_Distance_Secondary_DM_Original_Candidate\"] = \\\n candidates_df.apply(lambda x: calculate_edit_distance(x[\"Double_Metaphone_Secondary_Original_Name\"], x[\"Double_Metaphone_Secondary_Candidate\"]),\n axis=1)\n\n candidates_df[\"Edit_Distance_Primary_DM_Original_Secondary_Candidate\"] = \\\n candidates_df.apply(lambda x: calculate_edit_distance(x[\"Double_Metaphone_Primary_Original_Name\"],\n x[\"Double_Metaphone_Secondary_Candidate\"]),\n axis=1)\n candidates_df[\"Edit_Distance_Secondary_DM_Original_Primary_Candidate\"] = \\\n candidates_df.apply(lambda x: calculate_edit_distance(x[\"Double_Metaphone_Secondary_Original_Name\"],\n x[\"Double_Metaphone_Primary_Candidate\"]),\n axis=1)\n #candidates_df.to_csv(self._output_directory_path + \"Metaphone_Edit_distance_graph.csv\")\n candidates_df[\"Min_Edit_Distance_of_DM\"] = \\\n candidates_df.apply(lambda x: find_positive_min_value(x[\"Edit_Distance_Primary_DM_Original_Candidate\"],\n x[\"Edit_Distance_Secondary_DM_Original_Candidate\"],\n x[\"Edit_Distance_Primary_DM_Original_Secondary_Candidate\"],\n x[\"Edit_Distance_Secondary_DM_Original_Primary_Candidate\"]),\n axis=1)\n\n\n candidates_df['Rank'] = candidates_df.apply(\n lambda x: rank_candidate_ED_and_order_and_ED_of_DM(x[\"Edit_Distance\"], x[\"Order\"], x[\"Min_Edit_Distance_of_DM\"]), axis=1)\n\n candidates_df = candidates_df.sort_values(by='Rank')\n #head_candidates_df = candidates_df.head(10)\n #return head_candidates_df\n return candidates_df\n\n def compare_suggestions_with_ground_truth(self):\n ground_truth_df = pd.read_csv(self._input_directory_path + self._ground_truth_file_name)\n suggestions_df = pd.read_csv(self._input_directory_path + self._target_file_name)\n suggestions_with_ground_truth_df = self._compare_suggestion_with_ground_truth_by_provided_dfs(suggestions_df, ground_truth_df)\n return suggestions_with_ground_truth_df\n\n def _compare_suggestion_with_ground_truth_by_provided_dfs(self, suggestions_df, ground_truth_df):\n suggestions_df['Is_Original_Synonym'] = suggestions_df.apply(\n lambda x: compare_suggestion(x[\"Original\"], x[\"Candidate\"], ground_truth_df),\n axis=1)\n\n suggestions_df.to_csv(self._output_directory_path + \"suggestions_with_ground_truth.csv\", index=False)\n return suggestions_df\n\n\n\n def calculate_performance_for_suggestions(self):\n suggestions_df = pd.read_csv(self._input_directory_path + self._target_file_name)\n ground_truth_df = pd.read_csv(self._input_directory_path + self._ground_truth_file_name)\n self._calculate_performance(suggestions_df, ground_truth_df)\n\n\n def _calculate_performance(self, suggestions_df, ground_truth_df):\n source_names_series = suggestions_df[\"Original\"]\n # source_names_series = suggestions_df[\"Source_Name\"]\n source_names = source_names_series.tolist()\n source_names = list(set(source_names))\n source_names = sorted(source_names)\n\n final_results = []\n for i, source_name in enumerate(source_names):\n print(\"\\rFirst Name: {0} {1}/{2}\".format(source_name, i, len(source_names)), end='')\n\n source_name_results_df = suggestions_df[suggestions_df[\"Original\"] == source_name]\n # source_name_results_df = suggestions_df[suggestions_df[\"Source_Name\"] == source_name]\n predictions = source_name_results_df[\"Is_Original_Synonym\"]\n\n num_of_rows = source_name_results_df.shape[0]\n actual = [1] * num_of_rows\n\n accuracy = accuracy_score(actual, predictions)\n predictions_10 = predictions[0:10]\n actual_10 = actual[0:10]\n accuracy_10 = accuracy_score(actual_10, predictions_10)\n\n f1 = f1_score(actual, predictions)\n predictions_10 = predictions[0:10]\n actual_10 = actual[0:10]\n f1_10 = f1_score(actual_10, predictions_10)\n\n precison = precision_score(actual, predictions, average='micro')\n\n precison_1, precison_2, precison_3, precison_5, precision_10 = self._calculte_precision_at(actual,\n predictions)\n\n source_name_ground_truth_df = ground_truth_df[ground_truth_df[\"Name\"] == source_name]\n source_name_num_of_relevant_synonyms = source_name_ground_truth_df.shape[0]\n\n num_of_relevant_retrieved_at_10 = predictions_10.sum()\n num_of_retrieved_at_10 = predictions_10.count()\n\n num_of_relevant_retrieved = predictions.sum()\n num_of_retrieved = predictions.count()\n\n\n\n recall_related_to_ground_truth = -1\n if source_name_num_of_relevant_synonyms > 0:\n recall_related_to_ground_truth = num_of_relevant_retrieved / float(source_name_num_of_relevant_synonyms)\n\n recall_1, recall_2, recall_3, recall_5, recall_10 = self._calculate_recall_at(predictions,\n source_name_num_of_relevant_synonyms)\n\n # precision_related_to_ground_truth = num_of_relevant_retrieved / float(num_of_retrieved)\n\n # recall = recall_score(actual, predictions)\n\n result_tuple = (source_name, num_of_relevant_retrieved, num_of_retrieved, num_of_relevant_retrieved_at_10,\n num_of_retrieved_at_10,\n source_name_num_of_relevant_synonyms, accuracy, accuracy_10, f1, f1_10,\n precison_1, precison_2, precison_3, precison_5, precision_10, precison,\n recall_1, recall_2, recall_3, recall_5, recall_10, recall_related_to_ground_truth)\n final_results.append(result_tuple)\n\n final_results_df = pd.DataFrame(final_results, columns=['Source_Name',\n 'Num of Relevant Retrieved',\n 'Num of Retrieved',\n 'Num of Relevant Retrieved@10',\n 'Num of Retrieved@10',\n 'Total Num of Relevant in Ground Truth',\n 'Accuracy',\n 'Accuracy@10',\n 'F1',\n 'F1@10',\n 'Precision@1',\n 'Precision@2',\n 'Precision@3',\n 'Precision@5',\n 'Precision@10',\n 'Precision',\n 'Recall@1',\n 'Recall@2',\n 'Recall@3',\n 'Recall@5',\n 'Recall@10',\n 'Recall'])\n final_results_df.to_csv(self._output_directory_path + self._results_file_name, index=False)\n\n print(\"Done!!!\")\n\n def _calculte_precision_at(self, actual, predictions):\n # precision_at_results = ()\n # for num in [1, 2, 3, 5]:\n # predictions_num = predictions.head(num)\n # actual_num = actual.head(num)\n # precison_num = precision_score(actual_num, predictions_num, average='micro')\n # precision_at_results = precision_at_results + (precison_num,)\n # return precision_at_results\n\n predictions_1 = predictions[0:1]\n actual_1 = actual[0:1]\n precison_1 = precision_score(actual_1, predictions_1, average='micro')\n\n predictions_2 = predictions[0:2]\n actual_2 = actual[0:2]\n precison_2 = precision_score(actual_2, predictions_2, average='micro')\n\n predictions_3 = predictions[0:3]\n actual_3 = actual[0:3]\n precison_3 = precision_score(actual_3, predictions_3, average='micro')\n\n predictions_5 = predictions[0:5]\n actual_5 = actual[0:5]\n precison_5 = precision_score(actual_5, predictions_5, average='micro')\n\n predictions_10 = predictions[0:10]\n actual_10 = actual[0:10]\n precison_10 = precision_score(actual_10, predictions_10, average='micro')\n\n return precison_1, precison_2, precison_3, precison_5, precison_10\n\n def _calculate_recall_at(self, predictions, source_name_num_of_relevant_synonyms):\n num_of_relevant_retrieved_1 = predictions[0:1].sum()\n recall_1 = num_of_relevant_retrieved_1 / float(source_name_num_of_relevant_synonyms)\n\n num_of_relevant_retrieved_2 = predictions[0:2].sum()\n recall_2 = num_of_relevant_retrieved_2 / float(source_name_num_of_relevant_synonyms)\n\n num_of_relevant_retrieved_3 = predictions[0:3].sum()\n recall_3 = num_of_relevant_retrieved_3 / float(source_name_num_of_relevant_synonyms)\n\n num_of_relevant_retrieved_5 = predictions[0:5].sum()\n recall_5 = num_of_relevant_retrieved_5 / float(source_name_num_of_relevant_synonyms)\n\n num_of_relevant_retrieved_10 = predictions[0:10].sum()\n recall_10 = num_of_relevant_retrieved_10 / float(source_name_num_of_relevant_synonyms)\n\n return recall_1, recall_2, recall_3, recall_5, recall_10\n\n def create_suggestion_find_hits_in_ground_truth_and_calculate_performance(self):\n suggestions_df, ground_truth_df = self.provide_suggestions_save_with_constant_name()\n suggestions_with_ground_truth_df = self._compare_suggestion_with_ground_truth_by_provided_dfs(suggestions_df, ground_truth_df)\n self._calculate_performance(suggestions_with_ground_truth_df, ground_truth_df)\n\n\n\ndef compare_suggestion(original_name, candidate, ground_truth_df):\n result_df = ground_truth_df[\n (ground_truth_df[\"Name\"] == original_name) &\n (ground_truth_df[\"Synonym\"] == candidate)]\n\n if result_df.empty:\n return 0\n return 1\n\n\ndef calculate_shortest_path(original_name, candidate, graph):\n shortest_path = nx.shortest_path_length(graph, source=original_name, target=candidate)\n return shortest_path\n\ndef rank_candidate(edit_distance_result, order, shortest_path):\n rank = edit_distance_result * order * shortest_path\n return rank\n\ndef rank_candidate_ED_and_order(edit_distance_result, order):\n rank = edit_distance_result * order\n return rank\n\ndef rank_candidate_ED_and_order_and_ED_of_DM(edit_distance_result, order, min_edit_distance_of_DM):\n rank = edit_distance_result * order * (min_edit_distance_of_DM + 1)\n return rank\n\nname1_name2_edit_distance = {}\ndef calculate_edit_distance(name1, name2):\n if not name1 or not name2:\n return -1\n\n name1 = name1.lower()\n name2 = name2.lower()\n\n key = name1 + \" -> \" + name2\n opposite_key = name2 + \" -> \" + name1\n\n if key not in name1_name2_edit_distance:\n edit_dist = editdistance.eval(name1, name2)\n name1_name2_edit_distance[key] = edit_dist\n name1_name2_edit_distance[opposite_key] = edit_dist\n\n return name1_name2_edit_distance[key]\n\ndef get_phonetics_double_metaphone(name):\n # if name is not None and name is not 'None' and name is not '':\n # # name = unicode(name)\n result = phonetics.dmetaphone(name)\n return result[0], result[1]\n\ndef find_positive_min_value(value1, value2, value3, value4):\n array = [value1, value2, value3, value4]\n min_value = min(i for i in array if i >= 0)\n return min_value","sub_path":"dataset_builder/similar_name_advisor/similar_name_advisor.py","file_name":"similar_name_advisor.py","file_ext":"py","file_size_in_byte":25009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"264003150","text":"#importing libraries\nimport argparse\n\ndef predict(X_test, y_test,model):\n #importing libraries\n import numpy as np\n import torch\n import torch.nn as nn\n import torch.optim as optim\n from torch.utils.data import Dataset, DataLoader\n\n #loading the model and inputs\n X_test = np.load(X_test)\n y_test = np.load(y_test)\n\n #defining neural network architecture\n class binaryClassification(nn.Module):\n def __init__(self):\n super(binaryClassification, self).__init__()\n #number of input features is 12\n self.layer_1 = nn.Linear(12, 16)\n self.layer_2 = nn.Linear(16, 8)\n self.layer_out = nn.Linear(8, 1) \n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(p=0.1)\n self.batchnorm1 = nn.BatchNorm1d(16)\n self.batchnorm2 = nn.BatchNorm1d(8) \n #feed forward network\n def forward(self, inputs):\n x = self.relu(self.layer_1(inputs))\n x = self.batchnorm1(x)\n x = self.relu(self.layer_2(x))\n x = self.batchnorm2(x)\n x = self.dropout(x)\n x = self.layer_out(x)\n return x\n \n #loading model\n classifier = binaryClassification()\n classifier.load_state_dict(torch.load(model))\n \n #test data\n class testData(Dataset):\n def __init__(self, X_data):\n self.X_data = X_data\n\n def __getitem__(self,index):\n return self.X_data[index]\n\n def __len__(self):\n return len(self.X_data)\n\n test_data = testData(torch.FloatTensor(X_test))\n test_loader = DataLoader(dataset=test_data, batch_size=1, num_workers=0)\n #function to calculate accuracy\n def binary_acc(y_pred, y_test):\n y_pred_tag = torch.round(torch.sigmoid(y_pred))\n results_sum = (y_pred_tag == y_test).sum().float()\n acc = results_sum/y_test.shape[0]\n acc =torch.round(acc*100)\n return acc\n\n #test model\n y_pred_list = []\n classifier.eval()\n #ensures no back propagation during testing and reduces memeory usage\n with torch.no_grad():\n for X_batch in test_loader:\n y_test_pred = classifier(X_batch)\n y_test_pred = torch.sigmoid(y_test_pred)\n y_pred_tag = torch.round(y_test_pred)\n y_pred_list.append(y_pred_tag.cpu().numpy())\n y_pred_list = [i.squeeze().tolist() for i in y_pred_list] \n\n acc = binary_acc(torch.FloatTensor(y_pred_list), torch.FloatTensor(y_test))\n accu = acc.item()\n accuracy = accu/len(y_pred_list)\n\n with open('results.txt', 'w') as result:\n result.write(\" Prediction: {}, Actual: {}, Acc: {}\".format(y_pred_list,y_test,accuracy))\n \n print('Prediction has be saved successfully!')\n\n#defining and parsing arguments\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--X_test')\n parser.add_argument('--y_test')\n parser.add_argument('--model')\n args = parser.parse_args()\n print('Prediction has be saved successfully!')\n predict(args.X_test, args.y_test, args.model)\n ","sub_path":"PyTorch/components/predict_data/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"258753493","text":"import time\r\nimport clr\r\n\r\nfrom System import Action\r\nfrom System import *\r\n\r\nclr.AddReference('c:\\\\Programming\\\\DllNet\\\\x64\\\\Release\\\\McsUsbNet.dll')\r\nfrom Mcs.Usb import CMcsUsbListNet\r\nfrom Mcs.Usb import DeviceEnumNet\r\n\r\nfrom Mcs.Usb import CMeaDeviceNet\r\nfrom Mcs.Usb import McsBusTypeEnumNet\r\nfrom Mcs.Usb import DataModeEnumNet\r\nfrom Mcs.Usb import SampleSizeNet\r\n\r\ndef OnChannelData(x, cbHandle, numSamples):\r\n data, size = device.ChannelBlock_ReadFramesUI16(0, 5000, Int32(0));\r\n print(\"size: %d numSamples: %d Data: %04x %04x Checksum: %04x %04x %04x %04x\" % (size, numSamples, data[0], data[1], data[2], data[3], data[4], data[5]))\r\n \r\ndef OnError(msg, info):\r\n print(msg, info)\r\n\r\ndeviceList = CMcsUsbListNet(DeviceEnumNet.MCS_DEVICE_USB)\r\n\r\nprint(\"found %d devices\" % (deviceList.Count))\r\n\r\nfor i in range(deviceList.Count):\r\n listEntry = deviceList.GetUsbListEntry(i)\r\n print(\"Device: %s Serial: %s\" % (listEntry.DeviceName,listEntry.SerialNumber))\r\n\r\n\r\ndevice = CMeaDeviceNet(McsBusTypeEnumNet.MCS_USB_BUS);\r\ndevice.ChannelDataEvent += OnChannelData\r\ndevice.ErrorEvent += OnError\r\n\r\ndevice.Connect(deviceList.GetUsbListEntry(0))\r\n\r\ndevice.SetDataMode(DataModeEnumNet.Unsigned_16bit, 0)\r\ndevice.SetNumberOfChannels(2)\r\ndevice.EnableDigitalIn(Boolean(False), UInt32(0))\r\ndevice.EnableChecksum(True, 0)\r\nprint(\"Channels in Block: \", device.GetChannelsInBlock(0))\r\ndevice.SetSelectedData(device.GetChannelsInBlock(0), 50000, 5000, SampleSizeNet.SampleSize16Unsigned, device.GetChannelsInBlock(0))\r\n\r\ndevice.StartDacq()\r\ntime.sleep(2)\r\ndevice.StopDacq()\r\ndevice.Disconnect()\r\n","sub_path":"Examples/Python/Recording.py","file_name":"Recording.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"522123764","text":"from lk_flow import lk_flow\nimport util\nimport cv2\nimport numpy as np\nimport pyramid\nimport warp\nfrom hierarchical_lk import hierarchical_lk\n\ndef upscale(image):\n return (image * 255).astype(np.uint8)\n\ndef q1a():\n init = util.readImage('input/TestSeq/Shift0.png')\n final = util.readImage('input/TestSeq/ShiftR2.png')\n final2 = util.readImage('input/TestSeq/ShiftR5U5.png')\n plotDisplacements(init, final, 'output/ps5-1-a-1.png')\n plotDisplacements(init, final2, 'output/ps5-1-a-2.png')\n\ndef q1b():\n init = util.readImage('input/TestSeq/Shift0.png')\n final = util.readImage('input/TestSeq/ShiftR10.png')\n plotDisplacements(init, final, 'output/ps5-1-b-1.png')\n final = util.readImage('input/TestSeq/ShiftR20.png')\n plotDisplacements(init, final, 'output/ps5-1-b-2.png')\n final = util.readImage('input/TestSeq/ShiftR40.png')\n plotDisplacements(init, final, 'output/ps5-1-b-3.png')\n\ndef plotDisplacements(init, final, filename):\n U,V = lk_flow(init, final)\n U = upscale(U)\n V = upscale(V)\n U = cv2.cvtColor(U,cv2.COLOR_GRAY2RGB)\n V = cv2.cvtColor(V,cv2.COLOR_GRAY2RGB)\n false_color_U = cv2.applyColorMap(U, cv2.COLORMAP_JET)\n false_color_V = cv2.applyColorMap(V, cv2.COLORMAP_JET)\n false_color = np.concatenate((false_color_U, false_color_V), axis=1)\n cv2.imwrite(filename, false_color)\n\ndef main():\n init = util.readImage('input/TestSeq/Shift0.png')\n final = util.readImage('input/TestSeq/ShiftR2.png')\n #final = util.readImage('input/TestSeq/ShiftR5U5.png')\n #final = util.readImage('input/TestSeq/ShiftR10.png')\n #final = util.readImage('input/TestSeq/ShiftR20.png')\n #final = util.readImage('input/TestSeq/ShiftR40.png')\n \n U,V = lk_flow(init, final)\n #U, V = hierarchical_lk(init, final)\n\n U = upscale(U)\n V = upscale(V)\n\n U = cv2.cvtColor(U,cv2.COLOR_GRAY2RGB)\n V = cv2.cvtColor(V,cv2.COLOR_GRAY2RGB)\n\n false_color_U = cv2.applyColorMap(U, cv2.COLORMAP_JET)\n false_color_V = cv2.applyColorMap(V, cv2.COLORMAP_JET)\n \n #namedWindow(\"window\")\n cv2.imshow(\"U\", false_color_U)\n cv2.imshow(\"V\", false_color_V)\n\n import time\n time.sleep(30)\n\ndef q2a():\n image = util.readImage('input/DataSeq1/yos_img_01.jpg')\n gauPyr = pyramid.gaussPyramid(image, 3)\n q2_helper(gauPyr, 'output/ps5-2-b-1.png')\n\ndef q2b():\n image = util.readImage('input/DataSeq1/yos_img_01.jpg')\n gauPyr = pyramid.gaussPyramid(image, 3)\n lapPyr = pyramid.laplPyramid(gauPyr)\n q2_helper(lapPyr, 'output/ps5-2-b-2.png')\n\ndef q2_helper(pyramid, filename):\n rows, columns = pyramid[0].shape\n final_image = pyramid[0]\n for p in pyramid[1:]:\n r2, c2 = p.shape\n con = np.zeros((rows, c2))\n con[0:r2, 0:c2] = p\n final_image = np.concatenate((final_image, con), axis=1)\n cv2.imwrite(filename, upscale(final_image))\n\ndef q3a():\n image1 = util.readImage('input/DataSeq1/yos_img_01.jpg')\n image2 = util.readImage('input/DataSeq1/yos_img_02.jpg')\n image3 = util.readImage('input/DataSeq1/yos_img_03.jpg')\n q3helper(image1, image2, 1)\n q3helper(image2, image3, 5)\n image1 = util.readImage('input/DataSeq2/0.png')\n image2 = util.readImage('input/DataSeq2/1.png')\n image3 = util.readImage('input/DataSeq2/2.png')\n q3helper(image1, image2, 3)\n q3helper(image2, image3, 7)\n\ndef q3helper(image1, image2, part):\n gauPyr1 = pyramid.gaussPyramid(image1, 3)\n gauPyr2 = pyramid.gaussPyramid(image2, 3)\n for i in range(len(gauPyr1)):\n plotDisplacements(gauPyr1[i].astype(np.float32), gauPyr2[i].astype(np.float32), 'output/ps5-3-a-%d-%d.png' % (part, i))\n #continue\n U,V = lk_flow(gauPyr1[i].astype(np.float32), gauPyr2[i].astype(np.float32))\n warped = warp.warp(gauPyr2[i].astype(np.float32), U, V)\n diff = warped - gauPyr1[i].astype(np.float32)\n cv2.imwrite(\"output/ps5-3-a-%d-%d.png\" %(part + 1, i), upscale(diff))\n #import time\n #time.sleep(5)\n\ndef main3():\n init = util.readImage('input/TestSeq/Shift0.png')\n final = util.readImage('input/TestSeq/ShiftR2.png')\n U,V = lk_flow(init, final)\n warped = warp.warp(final, U, V)\n cv2.imshow(\"original\", init)\n cv2.imshow(\"modified\", warped)\n import time\n time.sleep(30)\n\nif __name__ == '__main__':\n #q1a()\n #q1b()\n #q2a()\n #q2b()\n q3a()\n","sub_path":"omscs/vision-4495/ps5/ps5.py","file_name":"ps5.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"15224511","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"code_info\n@Time :2021 2021/4/30 8:30\n@Author :Hanabi55\n@File :model1.py\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense, GRU, Embedding, Dropout\nimport matplotlib.pyplot as plt\nimport os\nimport random\nfrom trainData1 import x_train, y_train, x_test, y_test\nfrom createDict import w_to_id, id_to_w\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nrandomSeed = random.randint(1, 100)\nnp.random.seed(randomSeed)\nnp.random.shuffle(x_train)\nnp.random.seed(randomSeed)\nnp.random.shuffle(y_train)\ntf.random.set_seed(randomSeed)\n\nx_train = np.array(x_train)\ny_train = np.array(y_train)\nx_train = np.reshape(x_train, (len(x_train), 32))\nx_test = np.array(x_test)\ny_test = np.array(y_test)\nx_test = np.reshape(x_test, (len(x_test), 32))\n\nmodel = tf.keras.Sequential([\n Embedding(4294, 200),\n GRU(600, return_sequences=True),\n Dropout(0.5),\n GRU(750),\n Dropout(0.5),\n Dense(4294, activation='softmax')\n])\n\nmodel.compile(optimizer=tf.keras.optimizers.SGD(0.001),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),\n metrics=['sparse_categorical_accuracy'])\n\ncheckpoint_save_path = \"./checkpoint1~2/model1_create_1~2.ckpt\"\n\nif os.path.exists(checkpoint_save_path + '.index'):\n print('------------load the model1--------------')\n model.load_weights(checkpoint_save_path)\n\ncp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,\n save_weights_only=True,\n save_best_only=True,\n monitor='loss')\n\nhistory = model.fit(x_train, y_train, batch_size=32, epochs=50, validation_data=(x_test, y_test), validation_freq=1,\n callbacks=[cp_callback])\n\nmodel.summary()\n\nf_write = open('./weights1.txt', 'w')\nfor v in model.trainable_variables:\n f_write.write(str(v.name) + '\\n')\n f_write.write(str(v.shape) + '\\n')\n f_write.write(str(v.numpy()) + '\\n')\nf_write.close()\n\n# 可视化\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nplt.plot(loss, label='Training Loss')\nplt.plot(val_loss, label='Validation Loss')\nplt.title('Training and Validation loss')\nplt.legend()\nplt.show()\n\n\npeom_test = input(\"输入例诗:\")\ninputPeom = []\nprintPeom = []\nfor i in peom_test:\n inputPeom.append(w_to_id[i])\n # 输入进行预测\nfor i in range(16):\n inputPeom_tmp = []\n inputPeom_tmp = inputPeom + inputPeom_tmp\n while len(inputPeom_tmp) != 32:\n inputPeom_tmp.append(0)\n inputPeom_tmp = np.reshape(inputPeom_tmp, (1, 32))\n result = model.predict(inputPeom_tmp)\n predicted_next_w = tf.argmax(result, axis=1)\n predicted_next_w = int(predicted_next_w)\n inputPeom.append(predicted_next_w)\n printPeom.append(predicted_next_w)\n print(id_to_w[predicted_next_w], end='')\n","sub_path":"model1.py","file_name":"model1.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"387112945","text":"load(\"//rust:rust_grpc_compile.bzl\", \"rust_grpc_compile\")\nload(\"//rust:rust_proto_lib.bzl\", \"rust_proto_lib\")\nload(\"@io_bazel_rules_rust//rust:rust.bzl\", \"rust_library\")\n\ndef rust_grpc_library(**kwargs):\n name = kwargs.get(\"name\")\n deps = kwargs.get(\"deps\")\n visibility = kwargs.get(\"visibility\")\n\n name_pb = name + \"_pb\"\n name_lib = name + \"_lib\"\n\n rust_grpc_compile(\n name = name_pb,\n deps = deps,\n transitive = True,\n visibility = visibility,\n )\n\n rust_proto_lib(\n name = name_lib,\n compilation = name_pb,\n )\n\n rust_library(\n name = name,\n srcs = [name_pb, name_lib],\n deps = [\n str(Label(\"//rust/cargo:protobuf\")),\n str(Label(\"//rust/cargo:grpc\")),\n str(Label(\"//rust/cargo:tls_api\")),\n str(Label(\"//rust/cargo:tls_api_stub\")),\n ],\n visibility = visibility,\n )\n","sub_path":"rust/rust_grpc_library.bzl","file_name":"rust_grpc_library.bzl","file_ext":"bzl","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"538268819","text":"import re\n# import time\nfrom collections import Counter\nfrom concurrent.futures import (ProcessPoolExecutor, as_completed,\n ThreadPoolExecutor)\nfrom urllib.parse import urlparse\nimport plotly.graph_objects as go\nimport requests\n\nOWNER = \"Netflix\"\nTOKEN = \"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\"\nPATTERN = re.compile('\\<(.*?)\\>')\ncommit_query = \"https://api.github.com/repos/{}/{}/commits?per_page=100&page={}\"\n\n\ndef count_commits(query_url):\n repo_counter = Counter()\n r = requests.get(query_url, headers={\"Authorization\": f\"token {TOKEN}\"})\n for commit in r.json():\n commit = commit[\"commit\"]\n if not commit[\"message\"].startswith(\"Merge pull request #\"):\n author = commit[\"author\"][\"email\"]\n repo_counter[author] += 1\n return repo_counter\n\n\ndef count_repo(repo):\n commit_request = requests.get(commit_query.format(OWNER, repo, 1),\n headers={\"Authorization\": f\"token {TOKEN}\"})\n commit_headers = commit_request.headers\n if \"Link\" not in commit_headers:\n repo_counter = Counter()\n for commit in commit_request.json():\n if \"commit\" not in commit:\n break\n commit = commit[\"commit\"]\n if not commit[\"message\"].startswith(\"Merge pull request #\"):\n author = commit[\"author\"][\"email\"]\n repo_counter[author] += 1\n return repo_counter\n else:\n last_commit_page = re.findall(PATTERN, commit_headers['Link'])[1]\n commit_pages_nb = int(urlparse(last_commit_page).query.split('=')[-1])\n commit_tasks = []\n c = Counter()\n with ThreadPoolExecutor(max_workers=16) as tpe:\n for i in range(1, commit_pages_nb + 1):\n q = commit_query.format(OWNER, repo, i)\n commit_tasks.append(tpe.submit(count_commits, q))\n for commit_task in as_completed(commit_tasks):\n c += commit_task.result()\n return c\n\n\ndef get_repo_list():\n query = \"https://api.github.com/users/{}/repos?page={}\"\n r = requests.get(query.format(OWNER, 1), headers={\n \"Authorization\": f\"token {TOKEN}\"})\n repositories = []\n headers = r.headers\n\n last_page = re.findall(PATTERN, headers['Link'])[1]\n pages_nb = int(urlparse(last_page).query.split('=')[1])\n for i in range(1, pages_nb + 1):\n q = query.format(OWNER, i)\n r = requests.get(q, headers={\"Authorization\": f\"token {TOKEN}\"})\n for repo in r.json():\n repositories.append(repo[\"name\"])\n # print(repo[\"name\"])\n return repositories\n\n\ndef get_stats():\n commits_number = sum(authors.values())\n top100_commit_values = [val for name, val in top100]\n top100_commit_sum = sum(top100_commit_values)\n names = [name for name, val in top100]\n names.append('other')\n top100_commit_values.append(commits_number - top100_commit_sum)\n fig = go.Figure(data=[go.Pie(labels=names,\n title=f\"TOP-100 committers in {OWNER}\",\n values=top100_commit_values,\n showlegend=False, textinfo=\"none\")])\n fig.show()\n\n\nif __name__ == '__main__':\n # start = time.time()\n authors = Counter()\n tasks = []\n with ProcessPoolExecutor(max_workers=16) as ppe:\n for repository in get_repo_list():\n tasks.append(ppe.submit(count_repo, repository))\n\n for task in as_completed(tasks):\n authors += task.result()\n top100 = authors.most_common(100)\n print(dict(top100))\n # end = time.time()\n # print(end - start)\n get_stats()\n","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"374440961","text":"#######################################################################\n# Class to calculate EM cascade Radiation\n\n# References:\n# 1) http://iopscience.iop.org/article/10.1088/0004-637X/768/1/54/pdf\n\n# 25 July 2017\n# Author(s) : Wrijupan Bhattacharyya (wrijupan.bhattacharyya@desy.de)\n########################################################################\nfrom optical_depth import CalcOptDepthBB, CalcOptDepthPWL\nfrom trial_pgamma_model import PionDecay_gamma, \\\n PionDecay_positron, PionDecay_electron\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.constants import m_e, e, c, sigma_T, hbar\nfrom naima.models import PowerLaw as PL\nfrom naima.utils import trapz_loglog\nfrom scipy.special import gamma, cbrt\nimport matplotlib.pyplot as plt\nimport timeit\n\n_mec2_u = (m_e * c ** 2).to('eV')\n_c = c.cgs\n_sigmaT = sigma_T.cgs\n_Bcrit = 4.4e13 * u.G\n_norm = 1.5e21 * u.Unit('eV-3 cm-3')\n\nclass EMCascade(object):\n def __init__(self, particle_dist, gammamin, gammamax, gammagridsize,\n B, R = 1 * u.kpc, T = 2.7 * u.K, eta=3):\n self.B = B.to('G')\n self.R = R.to('cm')\n self.T = T.to('K')\n self.pdist = particle_dist\n self.gmin = gammamin\n self.gmax = gammamax\n self.bins = gammagridsize\n self.eta = eta\n\n def Ngam_dot(self, Eph):\n norm = 1.5e21 * u.Unit('erg-3 cm-3')\n norm = norm.to('eV-3 cm-3')\n return PionDecay_gamma(self.pdist, T=1e4 * u.K,\n norm=norm)._calc_spec_pgamma(Eph)\n\n def Qe(self, Epair):\n \"\"\"\n Parameters\n ----------\n Epair : astropy Quantity (float)\n only one energy of the pairs\n Returns\n --------\n Only one particle injection flux at energy = Epair\n \"\"\"\n norm = 1.5e21 * u.Unit('erg-3 cm-3')\n norm = norm.to('eV-3 cm-3')\n return PionDecay_positron(self.pdist,T=1e4 * u.K,\n norm=norm)._calc_spec_pgamma(Epair) + \\\n PionDecay_electron(self.pdist,T=1e4 * u.K,\n norm=norm)._calc_spec_pgamma(Epair)\n\n def Ne(self, Epair):\n \"\"\"\n Parameters\n ----------\n Epair : astropy Quantity (array_like)\n array of pair energies\n Returns\n -------\n array of particle distribution for Epair array\n \"\"\"\n norm = 1.5e21 * u.Unit('erg-3 cm-3')\n norm = norm.to('eV-3 cm-3')\n\n return (PionDecay_positron(self.pdist,T=1e4 * u.K,\n norm=norm)._spectrum(Epair) +\n PionDecay_electron(self.pdist,T=1e4 * u.K,\n norm=norm)._spectrum(Epair)) * \\\n (self.eta * (self.R / _c)).value\n\n def attenuation_BB(self, E):\n \"\"\"\n This method is not used at present.\n Optical depth calculated assuming\n a BPL target. See below\n \"\"\"\n calcdepth = CalcOptDepthBB(self.R, 2.7 * u.K, 1.5e62 * u.Unit('cm-3 eV-3'))\n E = E.to('eV') / _mec2_u\n tau = calcdepth.calc_opt_depth(E)\n return (1 - np.exp(- tau)) / tau, tau\n\n def tot_attenuation(self, Eph):\n \"\"\"\n Allows for array operation\n \"\"\"\n expTau = []\n Tau = []\n for E in Eph:\n tau = self.attenuation(E)\n expTau.append(tau[0])\n Tau.append(tau[1])\n return expTau, Tau\n\n def attenuation(self, E):\n taupl = CalcOptDepthPWL()\n norm = 1e-10 * u.Unit('erg cm-2 s-1')\n norm = norm.to('eV cm-2 s-1')\n tau = float(taupl.tau_YY(E, 1, 1e-5, norm, 1e5 * u.s, 10, 2, -2))\n return (1 - np.exp(- tau)) / tau, tau\n\n\n def Nsync_dot(self, Eph, Ne, Eemin, Eemax):\n \"\"\"\n Synchrotron emissivity calulated according to Reference.\n\n With this emissivity, the synch spectrum cuts of at higher\n energies compared to Aharonian's emissivity (synch cuts off\n ~3 orders of magnitude below Eemax whereas with Aharonian's\n emissivity, the synch spectrum cuts off ~9 orders of mag below).\n \"\"\"\n gmin = Eemin.to('eV') / _mec2_u\n gmax = Eemax.to('eV') / _mec2_u\n\n def gam_arr():\n log10gmin = np.log10(gmin)\n log10gmax = np.log10(gmax)\n return np.logspace(log10gmin, log10gmax, len(Ne))\n\n eps = Eph.to('eV') / _mec2_u\n b = self.B / _Bcrit\n emissivity = (gam_arr() ** - (2./3)) * np.exp(- eps / (b * gam_arr()**2))\n\n num = _c * _sigmaT * self.B ** 2\n denom = 6 * np.pi * _mec2_u * gamma(4. / 3) * b ** (4. / 3)\n A0 = num / denom\n norm = A0.value * (eps ** (-2. / 3))\n\n spec = norm * trapz_loglog(_mec2_u * Ne * emissivity, gam_arr(), axis=0)\n return spec.value\n\n def Nsync_dot_ah(self, Eph, Ne, Eemin, Eemax):\n \"\"\"\n Synchrotron emissivity according to Aharonian, Kelner, and Prosekin\n 2010, PhysRev D 82, 3002\n (`arXiv:1006.1045 `_)\n\n If this emissivity is used the synchrotron spectrum cuts off at\n much lower energies (~ 9-10 orders of magnitude below elec. energy).\n \"\"\"\n Eph = Eph.to('eV')\n gmin = Eemin.to('eV') / _mec2_u\n gmax = Eemax.to('eV') / _mec2_u\n\n def gam_arr():\n log10gmin = np.log10(gmin)\n log10gmax = np.log10(gmax)\n return np.logspace(log10gmin, log10gmax, len(Ne))\n\n def Gtilde(x):\n cb = x ** (1./3)\n gt1 = 1.808 * cb / np.sqrt(1 + 3.4 * cb ** 2.)\n gt2 = 1 + 2.210 * cb ** 2. + 0.347 * cb ** 4.\n gt3 = 1 + 1.353 * cb ** 2. + 0.217 * cb ** 4.\n return gt1 * (gt2 / gt3) * np.exp(-x)\n\n CS1_0 = np.sqrt(3) * e.value ** 3 * self.B.to('G').value\n CS1_1 = (2 * np.pi * m_e.cgs.value * c.cgs.value\n ** 2 * hbar.cgs.value * Eph.to('erg').value)\n CS1 = CS1_0 / CS1_1\n\n Ec = 3 * e.value * hbar.cgs.value * \\\n self.B.to('G').value * gam_arr() ** 2\n Ec /= 2 * (m_e * c).cgs.value\n\n EgEc = Eph.to('erg').value / Ec\n dNdE = CS1 * Gtilde(EgEc)\n\n spec = trapz_loglog(Ne * dNdE, gam_arr(), axis=0) /u.s /u.erg\n spec = (spec.to('s-1 eV-1')).value\n print(spec)\n return spec\n\n\n def Ngg_dot(self, Ee, Eemin, Eemax, Ne):\n eps1 = Ee / 0.9\n eps2 = Ee / 0.1\n\n def fabs(eps):\n return 1 - self.attenuation(eps)[0]\n\n N01_dot = self.Ngam_dot(eps1)\n N02_dot = self.Ngam_dot(eps2)\n Nsync1_dot = self.Nsync_dot(eps1, Ne, Eemin, Eemax)\n Nsync2_dot = self.Nsync_dot(eps2, Ne, Eemin, Eemax)\n\n term1 = fabs(eps1) * (N01_dot + Nsync1_dot)\n term2 = fabs(eps2) * (N02_dot + Nsync2_dot)\n return term1 + term2\n\n\n def gamma_grid(self, gammin, gammax, gambins):\n gamma_grid = []\n for i in range(-1, gambins + 2):\n gamma_grid.append(gammin * (gammax / gammin) ** ((i - 1) / (gambins - 1)))\n gamma_grid_ext = np.array(gamma_grid)\n gamma_grid = gamma_grid_ext[1:-1]\n energy_grid = gamma_grid * _mec2_u\n\n gamma_mid = (gamma_grid_ext[1:] * gamma_grid_ext[:-1]) ** 0.5\n delta_gamma = gamma_mid[1:] - gamma_mid[:-1]\n return gamma_grid, energy_grid, gamma_mid, delta_gamma\n\n def tridiag_mat(self, Ne, gammin, gammax, gambins):\n N = len(self.gamma_grid(gammin, gammax, gambins)[3])\n matrix_lhs = np.zeros((N, N), float)\n matrix_rhs = np.zeros(shape=N)\n B = self.B\n\n def cool_rate(gamm):\n num = _c * _sigmaT * B **2\n denom = 6 * np.pi * _mec2_u\n return (-(num / denom) * (gamm ** 2)).value\n\n gam, ene, gam_mid, del_gam = self.gamma_grid(gammin, gammax, gambins)\n tesc = (self.eta * (self.R / _c)).value\n\n for i in range(N-1, -1, -1):\n gamma_minus_half = gam_mid[i]\n gamma_plus_half = gamma_minus_half + del_gam[i]\n\n V3 = cool_rate(gamma_plus_half) / del_gam[i]\n V2 = (1 / tesc) - (cool_rate(gamma_minus_half) / del_gam[i])\n for j in range(N):\n if j == i :\n matrix_lhs[i, j] = V2\n elif j == i + 1 :\n matrix_lhs[i, j] = V3\n\n matrix_rhs[i] = self.Qe(ene[i]) + self.Ngg_dot(ene[i],\n ene[-1], 8e17 * u.eV, Ne=Ne)\n return matrix_lhs, matrix_rhs\n\n def Ne_soln(self, Ne, gammin, gammax, gambins):\n lhs, rhs = self.tridiag_mat(Ne, gammin, gammax, gambins)\n Ne_new = np.linalg.solve(lhs, rhs)\n print(\"\\n LHS =\", lhs)\n print(\"\\n RHS =\", rhs)\n print(\"\\n Solution = {} \\n\".format(Ne_new))\n return Ne_new\n\n @property\n def elec_spectrum(self):\n gam, ene, gam_mid, del_gam = self.gamma_grid(self.gmin, self.gmax, self.bins)\n splitted_gam = np.array_split(gam, len(gam) / 6)\n print(\"\\n CALCULATING INITIAL ELECTRON INJECTION ...\")\n Ne = self.Ne(splitted_gam[-1] * _mec2_u)\n iter = 1\n\n for gam in reversed(splitted_gam):\n print(\"\\n CALCULATING ELECTRON DISTRIBUTION FOR CASCADE GENERATION {}\".format(iter))\n print(\"\\n Starting Electron distribution : {}\".format(Ne))\n gammin, gammax, gambins = gam[0], gam[-1], len(gam)\n Ne_new = self.Ne_soln(Ne, gammin, gammax, gambins)\n Ne = Ne_new\n iter += 1\n print(\"\\n FINAL ELECTRON DISTRIBUTION AFTER N GENERATIONS : {}\".format(Ne))\n return Ne\n\n @property\n def photon_spectrum(self):\n Ne = self.elec_spectrum\n Eph = np.linspace(1e9 * u.eV, 1e12 * u.eV, 1e2)\n\n # IF Aharonian's parametrization for emissivity is used\n #the synchrotron spectrum cuts off at much lower energies\n # Eph = np.linspace(2 * u.eV, 1e5 * u.eV, 100)\n\n photon_flux = []\n for E in Eph:\n # Assumption : Only the high energy e- contributes to Synchrotron\n photon_flux.append(self.Nsync_dot(E, Ne, 8e12 * u.eV, 1e17 * u.eV))\n photon_flux = np.array(photon_flux)\n return Eph, photon_flux\n\n @property\n def esc_photon_spectrum(self):\n Eph, photon_flux = self.photon_spectrum\n exptau, tau = self.tot_attenuation(Eph)\n return Eph, tau, photon_flux, photon_flux * exptau\n\n @property\n def plot_photon_spectrum(self):\n #Eph, Nph = self.photon_spectrum\n Eph, tau, Nph, esc_Nph = self.esc_photon_spectrum\n esc_Nph_sed = ((Eph ** 2) * esc_Nph).value\n Nph_sed = ((Eph ** 2) * Nph).value\n\n fig, ax1 = plt.subplots()\n ax1.loglog(Eph.to('eV'), esc_Nph_sed, 'b-', label='escaping photons')\n ax1.loglog(Eph.to('eV'), Nph_sed, 'r-', label='last gen photons before esc')\n ax1.set_ylabel(r'$E^{2}$ $\\times$ dN/dE [eV $s^{-1}]$')\n ax1.set_xlabel('Energy [eV]')\n ax1.set_title(\"Cascade spectrum\")\n fig.tight_layout()\n plt.legend(loc='best')\n plt.savefig(\"./_test_cascade_sync_spec.png\")\n plt.show()\n\n\nif __name__ == '__main__':\n start = timeit.default_timer()\n pdist = PL(4.3e8 / u.eV, 1e3 * u.GeV, 2.5)\n emc = EMCascade(pdist, 1e8, 3e10, 11, 1e-4 * u.G, 1 * u.kpc, 1e4 * u.K)\n\n emc.plot_photon_spectrum\n stop = timeit.default_timer()\n print(\"Elapsed time for computation = {} secs\".format(stop - start))\n\n\n\n\n\n\n\n\n","sub_path":"cascade.py","file_name":"cascade.py","file_ext":"py","file_size_in_byte":11461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159584352","text":"# -*- coding:UTF-8 -*-\n\nimport tensorflow as tf\n\ndef count_param(): # 计算网络参数量\n total_parameters = 0\n for v in tf.trainable_variables():\n shape = v.get_shape()\n variable_parameters = 1\n for dim in shape:\n variable_parameters *= dim.value\n total_parameters += variable_parameters\n print('网络总参数量:', total_parameters)\n\ndef count_flops(graph):\n \"\"\"输出网络的flops\n graph =tf.get_default_graph()\n Args:\n graph ([graph]): 模型参数,注意一定要 在sess.run(tf.global_variables_initializer())后加入\n \"\"\"\n flops = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.float_operation())\n print('FLOPs: {}'.format(flops.total_float_ops))\n\n\ndef stats_graph(graph):\n \"\"\"输入参数量和flops\n\n Args:\n graph ([type]): [description]\n \"\"\"\n flops = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.float_operation())\n params = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())\n print('\\n FLOPs: {}; Trainable params: {} \\n'.format(flops.total_float_ops, params.total_parameters))\n","sub_path":"deepv2d/utils/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"487946926","text":"import numpy as np\nimport nibabel as nib\nimport matplotlib.pyplot as plt\nimport json\n\nDATASET_PATH = \"../../dataset/LITS/Training Batch 1/\"\nTrainData = []\nLabelData = []\nfor fi in range(3):\n img_path = DATASET_PATH + 'volume-'+str(fi)+'.nii'\n lab_path = DATASET_PATH + 'segmentation-'+str(fi)+'.nii'\n\n img = nib.load(img_path).get_data()\n lab = nib.load(lab_path).get_data()\n\n print(img.shape)\n print(lab.shape)\n\n img_3d_max = np.amax(img)\n lab_3d_max = np.amax(lab)\n\n img = img / img_3d_max * 255 # 对所求的像素进行归一化变成0-255范围,这里就是三维数据\n lab = lab / lab_3d_max * 255\n\n\n #for i in range(img.shape[2]-40, img.shape[2]): # 对切片进行循环\n # img_2d = img[:, :, i] # 取出一张图像\n # TrainData.append({'g': img_2d.tolist()})\n # lab_2d = lab[:, :, i]\n #LabelData.append({'g': lab_2d.tolist()})\n\n #if fi == 0 and i == img.shape[2] // 2:\n #fig = plt.imshow(img_2d)\n\n img = []\n lab = []","sub_path":"code/util/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"530718455","text":"import cv2\nimport numpy as np\nfrom dataPath import DATA_PATH\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.rcParams['figure.figsize'] = (6.0, 6.0)\nmatplotlib.rcParams['image.cmap'] = 'gray'\n\nimageName = DATA_PATH + \"images/dilation_example.jpg\"\n\n# Read the input image\nimage = cv2.imread(imageName)\n\n# Check for an invalid input\nif image is None:\n print(\"Could not open or find the image\")\ncv2.imshow(\"image\",image)\ncv2.waitKey(0)\n\n# Get structuring element/kernel which will be used for dilation\nkSize = (7,7)\nkernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kSize)\ncv2.imshow(\"image\",kernel1*255)\ncv2.waitKey(0)\n\n# Apply dilate function on the input image\nimageDilated = cv2.dilate(image, kernel1)\n\nplt.figure(figsize=[15,15])\nplt.subplot(121);plt.imshow(image);plt.title(\"Original Image\")\nplt.subplot(122);plt.imshow(imageDilated);plt.title(\"Dilated Image\");\nplt.show()\n\n# Get structuring element/kernel which will be used for dilation\nkSize = (3,3)\nkernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kSize)\ncv2.imshow(\"image\",255*kernel2)\ncv2.waitKey(0)\n\n# Apply dilate function on the input image\nimageDilated1 = cv2.dilate(image, kernel2, iterations=1)\nimageDilated2 = cv2.dilate(image, kernel2, iterations=2)\n\nplt.figure(figsize=[20,20])\nplt.subplot(131);plt.imshow(image);plt.title(\"Original Image\")\nplt.subplot(132);plt.imshow(imageDilated1);plt.title(\"Dilated Image Iteration 1\");\nplt.subplot(133);plt.imshow(imageDilated2);plt.title(\"Dilated Image Iteration 2\");\nplt.show()\n\n# Image taken as input\nimageName = DATA_PATH + \"images/erosion_example.jpg\"\nimage = cv2.imread(imageName, cv2.IMREAD_COLOR)\n# Check for invalid input\nif image is None:\n print(\"Could not open or find the image\")\ncv2.imshow(\"image\",image)\ncv2.waitKey(0)\n\n# Eroding the image , decreases brightness of image\nimageEroded = cv2.erode(image, kernel1)\n\nplt.figure(figsize=[15,15])\nplt.subplot(121);plt.imshow(image);plt.title(\"Original Image\")\nplt.subplot(122);plt.imshow(imageEroded);plt.title(\"Eroded Image\");\nplt.show()\n\n\n","sub_path":"week3-python/erosionDilation.py","file_name":"erosionDilation.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"79695409","text":"import struct\nfrom struct import pack_into, unpack_from\nfrom typing import Optional\n\n\nclass BufferReadError(ValueError):\n pass\n\n\nclass BufferWriteError(ValueError):\n pass\n\n\nclass Buffer:\n def __init__(self, capacity: Optional[int] = 0, data: Optional[bytes] = None):\n if data is not None:\n self._data = bytearray(data)\n self._length = len(data)\n else:\n self._data = bytearray(capacity)\n self._length = capacity\n self._pos = 0\n\n @property\n def capacity(self) -> int:\n return self._length\n\n @property\n def data(self) -> bytes:\n return bytes(self._data[: self._pos])\n\n def data_slice(self, start: int, end: int) -> bytes:\n return bytes(self._data[start:end])\n\n def eof(self) -> bool:\n return self._pos == self._length\n\n def seek(self, pos: int) -> None:\n assert pos <= self._length\n self._pos = pos\n\n def tell(self) -> int:\n return self._pos\n\n\n# BYTES\n\n\ndef pull_bytes(buf: Buffer, length: int) -> bytes:\n \"\"\"\n Pull bytes.\n \"\"\"\n if buf._pos + length > buf._length:\n raise BufferReadError\n v = buf._data[buf._pos : buf._pos + length]\n buf._pos += length\n return bytes(v)\n\n\ndef push_bytes(buf: Buffer, v: bytes) -> None:\n \"\"\"\n Push bytes.\n \"\"\"\n length = len(v)\n if buf._pos + length > buf._length:\n raise BufferWriteError\n buf._data[buf._pos : buf._pos + length] = v\n buf._pos += length\n\n\n# INTEGERS\n\n\ndef pull_uint8(buf: Buffer) -> int:\n \"\"\"\n Pull an 8-bit unsigned integer.\n \"\"\"\n try:\n v = buf._data[buf._pos]\n buf._pos += 1\n return v\n except IndexError:\n raise BufferReadError\n\n\ndef push_uint8(buf: Buffer, v: int) -> None:\n \"\"\"\n Push an 8-bit unsigned integer.\n \"\"\"\n buf._data[buf._pos] = v\n buf._pos += 1\n\n\ndef pull_uint16(buf: Buffer) -> int:\n \"\"\"\n Pull a 16-bit unsigned integer.\n \"\"\"\n try:\n v, = struct.unpack_from(\"!H\", buf._data, buf._pos)\n buf._pos += 2\n return v\n except struct.error:\n raise BufferReadError\n\n\ndef push_uint16(buf: Buffer, v: int) -> None:\n \"\"\"\n Push a 16-bit unsigned integer.\n \"\"\"\n pack_into(\"!H\", buf._data, buf._pos, v)\n buf._pos += 2\n\n\ndef pull_uint32(buf: Buffer) -> int:\n \"\"\"\n Pull a 32-bit unsigned integer.\n \"\"\"\n try:\n v, = struct.unpack_from(\"!L\", buf._data, buf._pos)\n buf._pos += 4\n return v\n except struct.error:\n raise BufferReadError\n\n\ndef push_uint32(buf: Buffer, v: int) -> None:\n \"\"\"\n Push a 32-bit unsigned integer.\n \"\"\"\n pack_into(\"!L\", buf._data, buf._pos, v)\n buf._pos += 4\n\n\ndef pull_uint64(buf: Buffer) -> int:\n \"\"\"\n Pull a 64-bit unsigned integer.\n \"\"\"\n try:\n v, = unpack_from(\"!Q\", buf._data, buf._pos)\n buf._pos += 8\n return v\n except struct.error:\n raise BufferReadError\n\n\ndef push_uint64(buf: Buffer, v: int) -> None:\n \"\"\"\n Push a 64-bit unsigned integer.\n \"\"\"\n pack_into(\"!Q\", buf._data, buf._pos, v)\n buf._pos += 8\n\n\ndef pull_uint_var(buf: Buffer) -> int:\n \"\"\"\n Pull a QUIC variable-length unsigned integer.\n \"\"\"\n try:\n kind = buf._data[buf._pos] // 64\n if kind == 0:\n value = buf._data[buf._pos]\n buf._pos += 1\n return value\n elif kind == 1:\n value, = unpack_from(\"!H\", buf._data, buf._pos)\n buf._pos += 2\n return value & 0x3FFF\n elif kind == 2:\n value, = unpack_from(\"!L\", buf._data, buf._pos)\n buf._pos += 4\n return value & 0x3FFFFFFF\n else:\n value, = unpack_from(\"!Q\", buf._data, buf._pos)\n buf._pos += 8\n return value & 0x3FFFFFFFFFFFFFFF\n except (IndexError, struct.error):\n raise BufferReadError\n\n\ndef push_uint_var(buf: Buffer, value: int) -> None:\n \"\"\"\n Push a QUIC variable-length unsigned integer.\n \"\"\"\n if value <= 0x3F:\n buf._data[buf._pos] = value\n buf._pos += 1\n elif value <= 0x3FFF:\n pack_into(\"!H\", buf._data, buf._pos, value | 0x4000)\n buf._pos += 2\n elif value <= 0x3FFFFFFF:\n pack_into(\"!L\", buf._data, buf._pos, value | 0x80000000)\n buf._pos += 4\n elif value <= 0x3FFFFFFFFFFFFFFF:\n pack_into(\"!Q\", buf._data, buf._pos, value | 0xC000000000000000)\n buf._pos += 8\n else:\n raise ValueError(\"Integer is too big for a variable-length integer\")\n\n\ndef size_uint_var(value: int) -> int:\n \"\"\"\n Returns the number of bytes required to encode the given value\n as a QUIC variable-length unsigned integer.\n \"\"\"\n if value <= 0x3F:\n return 1\n elif value <= 0x3FFF:\n return 2\n elif value <= 0x3FFFFFFF:\n return 4\n elif value <= 0x3FFFFFFFFFFFFFFF:\n return 8\n else:\n raise ValueError(\"Integer is too big for a variable-length integer\")\n","sub_path":"aioquic/buffer.py","file_name":"buffer.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"604291428","text":"# Imports the cPickle module for the respond config\nimport cPickle\n\n# Basic dictionary creating process, spread out to save headaches\ngender = (0, \"(Undefined, I am a robot. However I like to think of myself as Female.) + DYNR(You?)\")\nloc = (0, \"(I live in the internet.) + DYNR(You?)\")\nage = (0, \"(I am cgirgoAge) + DYNR(You?)\")\nfeel = (1, \"((I'm feeling fine.), (Good, Good), (Excellent, thanks!)) + DYNR(You?)\")\nbored = (1, \"DYNR(Mmm, yes)\")\nhappening = (1, \"(Not much)\")\n\nincattitle = (\"GENDER\", \"LOC\", \"AGE\", \"FEEL\", \"BORED\", \"HAPPENING\")\nincatdata = (gender, loc, age, feel, bored, happening)\n\nincat = dict(zip(incattitle, incatdata))\nintext = {\"M/F\": \"GENDER\", \"age\": \"AGE\", \"how old\": \"AGE\", \"live?\": \"LOC\", \"You?\": \"DYNQ\", \"bored\": \"BORED\"}\noutq = {\"How are you?\": \"FEEL\", \"What's up?\": \"HAPPENING\", \"Gee, this is boring\": \"BORED\"}\n\ndict = {\"incat\": incat, \"intext\": intext, \"outq\": outq}\n\n# Saves the dictionary to the file with cPickle\nfile = open(\"cgirgo.resp\", 'w')\ncPickle.dump(dict, file)\nfile.close()\n","sub_path":"genresp.py","file_name":"genresp.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"352125577","text":"import os\ndir = 'C:\\\\xampp'\nitems = os.listdir(dir)\nprint('Danh sach thu muc :')\nfor item in items:\n path = os.path.join(dir,item)\n if not os.path.isfile(path):\n print(item)\nprint('Danh sach file :')\nfor item in items:\n path = os.path.join(dir,item)\n if os.path.isfile(path):\n print(item)","sub_path":"example/modul2_python/day13_datetime_sys_os/os.py","file_name":"os.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"16553346","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 12 12:18:28 2018\n\n@author: nsde\n\"\"\"\n\n#%%\nimport torch\nimport argparse, datetime\nfrom torchvision import transforms\n\nfrom unsuper.trainer import vae_trainer\nfrom unsuper.data.mnist_data_loader import mnist_data_loader\nfrom unsuper.data.perception_data_loader import perception_data_loader\nfrom unsuper.helper.utility import model_summary\nfrom unsuper.helper.encoder_decoder import get_encoder, get_decoder\nfrom unsuper.models import get_model\n\n#%%\ndef argparser():\n \"\"\" Argument parser for the main script \"\"\"\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # Model settings\n ms = parser.add_argument_group('Model settings')\n ms.add_argument('--model', type=str, default='vae', help='model to train')\n ms.add_argument('--ed_type', type=str, default='mlp', help='encoder/decoder type')\n ms.add_argument('--stn_type', type=str, default='affinediff', help='transformation type to use')\n ms.add_argument('--beta', type=float, default=16.0, help='beta value for beta-vae model')\n \n # Training settings\n ts = parser.add_argument_group('Training settings')\n ts.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')\n ts.add_argument('--eval_epoch', type=int, default=1000, help='when to evaluate log(p(x))')\n ts.add_argument('--batch_size', type=int, default=1024, help='size of the batches')\n ts.add_argument('--warmup', type=int, default=100, help='number of warmup epochs for kl-terms')\n ts.add_argument('--lr', type=float, default=1e-3, help='learning rate for adam optimizer')\n \n # Hyper settings\n hp = parser.add_argument_group('Variational settings')\n hp.add_argument('--latent_dim', type=int, default=2, help='dimensionality of the latent space')\n hp.add_argument('--density', type=str, default='bernoulli', help='output density') \n hp.add_argument('--eq_samples', type=int, default=1, help='number of MC samples over the expectation over E_q(z|x)')\n hp.add_argument('--iw_samples', type=int, default=1, help='number of importance weighted samples')\n \n # Dataset settings\n ds = parser.add_argument_group('Dataset settings')\n ds.add_argument('--classes','--list', type=int, nargs='+', default=[0,1,2,3,4,5,6,7,8,9], help='classes to train on')\n ds.add_argument('--num_points', type=int, default=10000, help='number of points in each class')\n ds.add_argument('--logdir', type=str, default='beta_final16', help='where to store results')\n ds.add_argument('--dataset', type=str, default='mnist', help='dataset to use')\n \n # Parse and return\n args = parser.parse_args()\n return args\n\n#%%\nif __name__ == '__main__':\n # Input arguments\n args = argparser()\n \n # Logdir for results\n if args.logdir == '':\n logdir = 'res/' + args.model + '/' + datetime.datetime.now().strftime('%Y_%m_%d_%H_%M')\n else:\n logdir = 'res/' + args.model + '/' + args.logdir\n \n # Load data\n print('Loading data')\n if args.dataset == 'mnist':\n transformations = transforms.Compose([ \n #transforms.RandomAffine(degrees=20, translate=(0.1,0.1)), \n transforms.ToTensor(), \n ])\n trainloader, testloader = mnist_data_loader(root='unsuper/data', \n transform=transformations,\n download=True,\n classes=args.classes,\n num_points=args.num_points,\n batch_size=args.batch_size)\n img_size = (1, 28, 28)\n elif args.dataset == 'perception':\n trainloader, testloader = perception_data_loader(root='unsuper/data', \n transform=None,\n download=True,\n classes=args.classes,\n num_points=args.num_points,\n batch_size=args.batch_size)\n img_size = (1, 400, 200)\n\n # Construct model\n model_class = get_model(args.model)\n model = model_class(input_shape = img_size,\n latent_dim = args.latent_dim, \n encoder = get_encoder(args.ed_type), \n decoder = get_decoder(args.ed_type), \n outputdensity = args.density,\n ST_type = args.stn_type)\n \n # Summary of model\n #model_summary(model)\n \n # Optimizer\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n \n # Train model\n Trainer = vae_trainer(img_size, model, optimizer)\n Trainer.fit(trainloader=trainloader, \n n_epochs=args.n_epochs, \n warmup=args.warmup, \n logdir=logdir,\n testloader=testloader,\n eq_samples=args.eq_samples, \n iw_samples=args.iw_samples,\n beta=args.beta,\n eval_epoch=args.eval_epoch)\n \n # Save model\n torch.save(model.state_dict(), logdir + '/trained_model.pt')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"494162998","text":"#! /usr/bin/python3\n# count_ids.py\n# By Ike Clinton\n# Python script to count CAN frame IDs from a log file\n# Can logs are of the form:\n# (timestamp) interface ID#FFFFFFFFFFFFFFFF\n# Where timestamp is of the form: 0000000000.000000\n# Where interface is usually one of vcan0 or can0\n# Where frame ID is 3 hex chars: FFF\n# and the data length is up to 8 bytes in hex following a pound sign\n# Example:\n# (1436509053.650713) vcan0 19E#6FE1CB7DE2218456\nimport argparse\nfrom collections import Counter\nimport os.path\nimport sys\n\n# Override Argument parser to throw error and generate help message\n# if undefined args are passed\nclass MyParser(argparse.ArgumentParser):\n def error(self, message):\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)\n\n# Create argparser object to add command line args and help option\nparser = MyParser(\n\tdescription = 'This program takes in a a CAN data log file as input and prints'\n\t+ ' the unique can IDs that it finds',\n\tepilog = '',\n\tadd_help = True)\n\n# Add a \"-i\" argument to receive a filename\nparser.add_argument(\"-i\", action = \"store\", dest=\"file\",\n\t\t\t\t\thelp = \"log file to read in\")\n\n# Print help if no args are supplied\nif len(sys.argv)==1:\n\tparser.print_help()\n\tsys.exit(1)\n\n# Split and process arguments into \"args\"\nargs = parser.parse_args()\n\n# Check that filename is not blank\nif args.file == '':\n\tprint(\"ERROR: File name must not be blank\")\n\tparser.print_help()\n\tsys.exit(1)\n\n# Make sure file exists\nif os.path.isfile(args.file) == False:\n\tprint(\"ERROR: File does not exist\")\n\tparser.print_help()\n\tsys.exit(1)\n\n# Open the file as read only, read the lines into text\nwith open(args.file, 'r') as myfile:\n\ttext = myfile.readlines()\n\n# Strip newlines, and split on spaces\n# Select the first 3 characters (the frame ids) and count them\n# Finally, use most_common() to display them decreasing order\nc = Counter(l.strip().split()[2][0:3] for l in text[0:len(text)-1])\nfor x in c.most_common():\n\tprint(x)\n","sub_path":"count_ids.py","file_name":"count_ids.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"607672842","text":"from functools import wraps\n\nfrom .exceptions import ClientError\nfrom .models import Chatroom\n#Error catching utility file\ndef catch_client_error(func):\n\n @wraps(func)\n def inner(message, *args, **kwargs):\n try:\n return func(message, *args, **kwargs)\n except ClientError as e:\n e.send_to(message.reply_channel)\n return inner\n \ndef get_room_or_error(room_id, user):\n #Checks if user is logged in\n if not user.is_authenticated():\n raise ClientError(\"USER_HAS_TO_LOGIN\")\n #Find the room requested by ID\n try:\n room = Chatroom.objects.get(pk=room_id)\n except Chatroom.DoesNotExist:\n raise ClientError(\"ROOM_INVALID\")\n #Check permissions if needed, possibly for secret admin chats\n #Maybe add some other time\n \n \n return room\n","sub_path":"teamwork/apps/chat/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"321307969","text":"#!/usr/bin/env python36\n\"\"\"\n\"\"\"\nimport sys\n\nimport argparse\n\n# import stage_check\n# - works if importing stage_check/stage_check.py outside\n# the context of a wheel package names stage_check\n# from stage_check import stage_check\n# - works if importing stage_check/stage_check.py from\n# the context of a wheel package names stage_check,\n# but not from outside..\n# \n# renaming stage_check.py -> test_framwork.py works in\n# both contexts...\n#\ntry:\n from stage_check import stage_check\nexcept ImportError:\n import stage_check\n\ndef parse_command_args():\n parser = argparse.ArgumentParser(description='Run Stage Tests')\n\n parser.add_argument('-r', '--router',\n default='',\n help='Effective router name')\n\n parser.add_argument('-n', '--node',\n default='',\n help='Effective node name')\n\n parser.add_argument('-p', '--primary-regex',\n default='^.*?A$',\n help='Regex to identify primary node')\n\n parser.add_argument('-s', '--secondary-regex',\n default='^.*?B$',\n help='Regex to identify secondary node')\n\n parser.add_argument('--site-regex',\n default='([0-9]+)',\n help='Regex to extract a router/node site number; '\n 'site number must be a regex group -- enclosed by ()s')\n\n parser.add_argument('--pod-regex',\n default='[pP]([0-9]+)$',\n help='Regex to extract a router/node pod number; '\n 'pod number must be a regex group -- enclosed by ()s')\n\n parser.add_argument('-v', '--version', action='store_true',\n default=False, help='Print version Information')\n\n parser.add_argument('-d', '--debug', action='store_true',\n default=False, help='Outputs debug information')\n\n parser.add_argument('-e', '--exclude', nargs='+',\n default=[], help='Excludes one or more tests')\n\n parser.add_argument('--context', action='store_true',\n default=False, help='Outputs router context(s)')\n\n parser.add_argument('--warn-is-fail', action='store_true',\n default=False, help='Treat warning as failure')\n\n parser.add_argument('-R', '--router-patterns', nargs='+',\n default=[], help='Conductor only: router substring matching patterns')\n\n parser.add_argument('--regex-patterns', action='store_true',\n default=False, help='Conductor only: match router patterns with regex')\n\n parser.add_argument('--start-after',\n default=None,\n help='Conductor only: Skip routers matching pattern until after this substring match')\n\n parser.add_argument('-o', '--output',\n default='',\n help='Send output to filename provided')\n\n parser.add_argument('-c', '--config-path', \n default=None, \n help='Path for configuration file')\n\n parser.add_argument('-g', '--generic', action='store_true',\n default=False, help='Use the generic config provided in the stage_check pex file.' + \n 'If the --config-path (-c) option is also provided, it will be preferred' )\n\n parser.add_argument('-j', '--json', action='store_true',\n default=False, help='Override configuration to output as JSON') \n \n return parser.parse_args()\n\n############################\n#\n#\n# Main Section\n#\n############################\ndef main():\n args = parse_command_args()\n te = stage_check.TestExecutor(args)\n te.test_routers()\n return te.tests_shell_status()\n\nif __name__ == '__main__':\n status = main()\n sys.exit(status)\n","sub_path":"stage_check/stage_check/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"517014142","text":"\"\"\"Take a directory of JPEG images and register them for all HT_PROBE samples, cycling through the images in the directory.\"\"\"\n\nfrom ch.systemsx.cisd.openbis.generic.shared.api.v1.dto import SearchCriteria\nimport os\n\n\ndef process(tr):\n search_service = tr.getSearchService()\n sc = SearchCriteria()\n sc.addMatchClause(SearchCriteria.MatchClause.createAttributeMatch(SearchCriteria.MatchClauseAttribute.TYPE, '5HT_PROBE'))\n five_ht_samps = search_service.searchForSamples(sc)\n five_ht_exp = get_or_create_experiment(tr)\n data_set = create_image_data_set(tr, five_ht_exp)\n\n incoming_folder = tr.getIncoming().getPath()\n incoming_filenames = os.listdir(incoming_folder)\n count = len(incoming_filenames)\n i = 0\n # match samples to images, cycling through the images\n for samp in five_ht_samps:\n samp = tr.makeSampleMutable(samp)\n samp.setExperiment(five_ht_exp)\n index = i % count\n i = i + 1\n add_image_to_folder(tr, samp, incoming_folder, incoming_filenames[index])\n\n tr.moveFile(incoming_folder, data_set, \"images/\")\n\ndef get_or_create_experiment(tr):\n exp = tr.getExperiment(\"/PROBES/PROBE/5HT-EXP\")\n if exp:\n return exp\n\n proj = tr.createNewProject(\"/PROBES/PROBE\")\n proj.setDescription(\"Project for speculative 5HT experiments\")\n\n exp = tr.createNewExperiment(\"/PROBES/PROBE/5HT-EXP\", \"5HT_EXP\")\n return exp\n\ndef create_image_data_set(tr, exp):\n ds = tr.createNewDataSet('5HT_IMAGE')\n ds.setExperiment(exp)\n return ds\n\ndef add_image_to_folder(tr, samp, folder, filename):\n new_filename = samp.getCode() + \".jpg\"\n linked_path = os.path.join(folder, new_filename)\n os.link(os.path.join(folder, filename), linked_path)\n","sub_path":"openbis_mobile/source/core-plugins/ipad-ui-demo/1/dss/drop-boxes/ipad-image/data-set-handler.py","file_name":"data-set-handler.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"92675203","text":"#!/usr/bin/env python3\n\nimport pycurl\nimport json\nimport argparse\nfrom urllib.parse import urlencode\nfrom io import BytesIO\n\n#Curl POST function\ndef postData(url, data):\n buffer = BytesIO()\n c = pycurl.Curl()\n c.setopt(pycurl.URL, url)\n c.setopt(pycurl.HTTPHEADER, ['Content-Type: application/json'])\n c.setopt(pycurl.POST, 1)\n c.setopt(pycurl.POSTFIELDS, data)\n c.setopt(c.WRITEFUNCTION, buffer.write)\n c.perform()\n http = c.getinfo(pycurl.HTTP_CODE)\n c.close()\n return {'http':http, 'data':buffer.getvalue()}\n\n#recursive json object sorter\ndef order(obj):\n if isinstance(obj, dict):\n return sorted((k, order(v)) for k, v in obj.items())\n if isinstance(obj, list):\n return sorted(order(x) for x in obj)\n else:\n return obj\n\n#default configuration\ncontrol_server = 'http://aoddev.agentsofnature.com'\ntest_server = 'http://backend.agentsofdiscovery.net'\napi = '/api/v3'\npath = '/mobile/users/'\njson_data = json.dumps({\"string\":\"hello\"})\noutfile = 'out.txt'\nautomate = False;\n\n#arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-c\",\"--control\",help=\"Control server url.\")\nparser.add_argument(\"-t\",\"--test\",help=\"Test server url.\")\nparser.add_argument(\"-a\",\"--api\",help=\"API version path. eg: '/api/v3'\")\nparser.add_argument(\"-p\",\"--path\",help=\"Path to send the POST to. eg: '/mobile/users/'\")\nparser.add_argument(\"-d\",\"--data\",help=\"JSON POST data to send.\")\nparser.add_argument(\"-i\",\"--input\",help=\"Testing JSON data to use for automated tests. Overrides previous arguments.\")\nparser.add_argument(\"-o\",\"--out\",help=\"Output filename.\")\n\nargs = parser.parse_args()\nif args.control is not None:\n control_server = args.control\nif args.test is not None:\n control_server = args.control\nif args.api is not None:\n api = args.api\nif args.path is not None:\n path = args.path\nif args.data is not None:\n json_data = json.dumps(open(args.data).read())\nif args.input is not None:\n automate = True\n with open(args.input) as data_file:\n test_data = json.load(data_file)\nif args.out is not None:\n outfile = args.out\n\nf = open(outfile, 'w')\n\n#do automated tests\nif automate:\n for test in test_data['tests']:\n #configure the test data\n control_url = test['control_server'] + test['api'] + test['path']\n test_url = test['test_server'] + test['api'] + test['path']\n json_data = json.dumps(test['data'])\n\n #POST to server and get response\n control_response = postData(control_url, json_data)\n test_response = postData(test_url,json_data)\n\n #compare response\n result = \"FAIL\"\n try:\n control_data = json.loads(control_response['data'].decode('iso-8859-1'))\n test_data = json.loads(test_response['data'].decode('iso-8859-1'))\n if order(control_data) == order(test_data):\n result = \"PASS\"\n except:\n pass\n\n #write report\n f.write(\"\\n------------------------------------\" + test['id'] + \"------------------------------------\\n\")\n f.write(\"\\n\" + result + \"\\n \\n\")\n f.write(\"CONTROL: \" + control_url + \"\\n\")\n f.write(\"RESPONSE: \" + str(control_response['http']) + \"\\n\")\n f.write(control_response['data'].decode('iso-8859-1'))\n f.write(\"\\n \\n\")\n f.write(\"TEST: \" + test_url + \"\\n\")\n f.write(\"RESPONSE: \" + str(test_response['http']) + \"\\n\")\n f.write(test_response['data'].decode('iso-8859-1'))\n f.write(\"\\n \\n\")\n f.write(\"POST data: \\n\")\n f.write(json_data)\n f.write(\"\\n\")\nelse:\n #set urls\n control_url = control_server + api + path\n test_url = test_server + api + path\n\n #post the data\n control_response = postData(control_url,json_data)\n test_response = postData(test_url,json_data)\n\n #write output file\n f.write(\"POST data: \\n\")\n f.write(json_data)\n f.write(\"\\n \\n\")\n f.write(\"CONTROL: \" + control_url + \"\\n\")\n f.write(\"RESPONSE: \\n\")\n f.write(control_response.decode('iso-8859-1'))\n f.write(\"\\n \\n\")\n f.write(\"TEST: \" + test_url + \"\\n\")\n f.write(\"RESPONSE: \\n\")\n f.write(test_response.decode('iso-8859-1'))\n f.write(\"\\n\")\n\nf.close()\n","sub_path":"testrig.py","file_name":"testrig.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"287094509","text":"\"\"\"Compressed Finite Sparse Row Matrix\"\"\"\n\nimport numba as nb\nfrom numba import jitclass\nimport numpy as np\n\nfrom .types import MAX_BCSR, POINT_STORAGE_np, INDEX_STORAGE_np\nfrom .bcsr import bcsr_matrix\nfrom .matmul import finite_matmul_2d, finite_matmul_1d\n\n\ndef fcsr_matrix(arg1, shape=None):\n \"\"\"\n Can be instantiated in 2 ways:\n\n fcsr_constructor(bcsr_iterable)\n bcsr_iterable is a iterable of bcsr matrices\n\n fcsr_constructor(fcoo, shape)\n fcoo is a dictionary of binary coo form\n See coo_to_fcoo\n\n :return: FCSR\n \"\"\"\n if isinstance(arg1, (tuple, list)):\n if len(arg1) > 10:\n raise Exception(\"To Large of FCSR. Adjust MAX_BCSR in transform.py\")\n for b_matrix in arg1:\n if shape is None:\n shape = b_matrix.shape\n elif shape == b_matrix.shape:\n pass\n else:\n raise Exception(\"Shapes don't Match\")\n\n b_matrix_decomp = list(arg1)\n for empty_b_matrix in range(10-len(arg1)):\n b_matrix_decomp.append(bcsr_matrix(_row_p=np.array([], dtype=POINT_STORAGE_np),\n _col_i=np.array([], dtype=INDEX_STORAGE_np),\n _shape=shape))\n\n b_matrix_decomp = tuple(b_matrix_decomp)\n return fcsr_matrix(_bcsr_decomp=b_matrix_decomp, _shape=shape)\n\n elif isinstance(arg1, dict):\n if shape is None:\n raise Exception(\"Shape must be defined\")\n b_matrix_decomp = []\n for key in arg1.keys():\n temp_bcsr_matrix = bcsr_matrix(arg1[key][0],\n arg1[key][1],\n _shape=shape)\n temp_bcsr_matrix.alpha = key\n b_matrix_decomp.append(temp_bcsr_matrix)\n\n for empty_b_matrix in range(10-len(arg1)):\n b_matrix_decomp.append(bcsr_matrix(_row_p=np.array([], dtype=POINT_STORAGE_np),\n _col_i=np.array([], dtype=INDEX_STORAGE_np),\n _shape=shape))\n\n b_matrix_decomp = tuple(b_matrix_decomp)\n return fcsr_matrix(_bcsr_decomp=b_matrix_decomp, _shape=shape)\n else:\n raise NotImplementedError\n\n\nbcsr_type = nb.deferred_type()\nbcsr_type.define(bcsr_matrix.class_type.instance_type)\n\nfcsr_spec = [\n ('shape', nb.types.UniTuple(nb.int64, 2)),\n ('bcsr_decomp', nb.types.UniTuple(bcsr_type, MAX_BCSR)),\n]\n\n\n@jitclass(fcsr_spec)\nclass fcsr_matrix:\n \"\"\"\n Finite Compressed Row Matrix\n \"\"\"\n def __init__(self, _bcsr_decomp, _shape):\n \"\"\"\n \"\"\"\n\n self.shape = _shape\n self.bcsr_decomp = _bcsr_decomp\n\n @property\n def depth(self):\n return len(self.bcsr_decomp)\n\n def dot1d(self, other):\n \"\"\"\n\n :param other:\n :return:\n \"\"\"\n if len(other.shape) != 1:\n raise Exception('Must be a 1d Array')\n\n d1 = other.shape[0]\n m, n = self.shape\n\n if n != d1:\n raise Exception('Dimension MisMatch')\n\n return finite_matmul_1d(self.bcsr_decomp, other, m)\n\n def dot2d(self, other):\n \"\"\"\n\n :param other:\n :return:\n \"\"\"\n if len(other.shape) != 2:\n raise Exception('Must be a 2d Array')\n\n m, n = self.shape\n d1, k = other.shape\n\n # if k > 1 and other.flags.c_contiguous:\n # raise Exception(\"Use Fortran Array\")\n\n if n != d1:\n raise Exception('Dimension MisMatch')\n\n return finite_matmul_2d(self.bcsr_decomp, other, m, k)\n\n def to_array(self):\n array = np.zeros(self.shape)\n for sub_bcsr_matrix in self.bcsr_decomp:\n if not sub_bcsr_matrix.empty:\n array += sub_bcsr_matrix.to_array()\n return array\n\n @property\n def size(self):\n nnz = 0\n for sub_bcsr_matrix in self.bcsr_decomp:\n if not sub_bcsr_matrix.empty:\n nnz += sub_bcsr_matrix.size\n return nnz\n\n @property\n def sparsity(self):\n return self.size / (self.shape[0] * self.shape[1])\n\n @property\n def mem(self):\n return self.__sizeof__()\n\n def __sizeof__(self):\n \"\"\"\n returns roughly the memory storage of instance in Bytes\n\n Storage Includes:\n self.row_p\n self.col_i\n\n :return:\n \"\"\"\n mem = 0\n for sub_bcsr_matrix in self.bcsr_decomp:\n if not sub_bcsr_matrix.empty:\n mem += sub_bcsr_matrix.mem\n return mem\n","sub_path":"lmdec/array/sparse/finite/fcsr.py","file_name":"fcsr.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"381504994","text":"'''\nProvides classes for loading StreamItem objects to/from kvlayer\n\nThis software is released under an MIT/X11 open source license.\n\nCopyright 2012-2013 Diffeo, Inc.\n'''\nfrom collections import deque\nimport os\nimport sys\nimport uuid\nimport kvlayer\nimport logging\nimport streamcorpus\n\nlogger = logging.getLogger(__name__)\n\nclass from_kvlayer(object):\n '''\n loads StreamItems from a kvlayer table based an i_str passed to\n __call__, where the i_str must be a four-tuple joined on commas\n that provides (epoch_ticks_1, doc_id_1, epoch_ticks_2, doc_id_2) \n\n If no values are specified, it defaults to the entire table. If\n one of the epoch_ticks is provided, then both must be provided.\n If one of the doc_ids is provided, then both must be provided.\n That is, half-open ranges are not supported.\n '''\n def __init__(self, config):\n self.config = config\n self.client = kvlayer.client(config)\n self.client.setup_namespace(\n dict(stream_items=2))\n\n def __call__(self, i_str):\n if i_str:\n epoch_ticks_1, doc_id_1, epoch_ticks_2, doc_id_2 = i_str.split(',')\n epoch_ticks_1 = uuid.UUID(int=int(epoch_ticks_1))\n epoch_ticks_2 = uuid.UUID(int=int(epoch_ticks_2))\n if doc_id_1:\n assert doc_id_2, (doc_id_1, doc_id_2)\n doc_id_1 = uuid.UUID(hex=doc_id_1)\n doc_id_2 = uuid.UUID(hex=doc_id_2)\n key1 = (epoch_ticks_1, doc_id_1)\n key2 = (epoch_ticks_2, doc_id_2)\n else:\n key1 = (epoch_ticks_1, )\n key2 = (epoch_ticks_2, )\n key_ranges = [(key1, key2)]\n else:\n key_ranges = []\n\n for key, data in self.client.scan( 'stream_items', *key_ranges ):\n errors, data = streamcorpus.decrypt_and_uncompress(data)\n yield streamcorpus.deserialize(data)\n\n\nclass to_kvlayer(object):\n '''\n stores StreamItems in a kvlayer table called \"stream_items\" in the\n namespace specified in the config dict for this stage.\n \n each StreamItem is serialized and xz compressed and stored on the\n key (UUID(int=epoch_ticks), UUID(hex=doc_id))\n\n This key structure supports time-range queries directly on the\n stream_items table.\n\n To search by doc_id, one can look up all epoch_ticks for a given\n doc_id using the index table stream_items_doc_id_epoch_ticks,\n which has the keys reversed and no data.\n '''\n def __init__(self, config):\n self.config = config\n self.client = kvlayer.client(config)\n self.client.setup_namespace(\n dict(stream_items=2,\n stream_items_doc_id_epoch_ticks=2))\n\n def __call__(self, t_path, name_info, i_str):\n inverted_keys = deque()\n def keys_and_values():\n total_mb = 0.\n for si in streamcorpus.Chunk(t_path):\n key1 = uuid.UUID(int=si.stream_time.epoch_ticks)\n key2 = uuid.UUID(hex=si.doc_id)\n data = streamcorpus.serialize(si)\n errors, data = streamcorpus.compress_and_encrypt(data)\n assert not errors, errors\n total_mb += float(len(data)) / 2**20\n logger.info('%r, %r --> %d, %.3f', key1, key2, len(data), total_mb)\n yield (key1, key2), data\n inverted_keys.append( ((key2, key1), r'') )\n\n self.client.put( 'stream_items', *keys_and_values())\n\n self.client.put( 'stream_items_doc_id_epoch_ticks', *inverted_keys)\n","sub_path":"src/streamcorpus_pipeline/_kvlayer.py","file_name":"_kvlayer.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"135098979","text":"from basiques.cube import Cube\nimport random\n\nclass Mur(Cube):\n \"\"\"Classe héritant de la classe Cube, caractérisée par:\n -ses coordonnées: x, y, z\n -sa hauteur\n -sa largueur\n -sa longueur\"\"\"\n\n def __init__(self, x, y, z, larg, long, haut):\n \"\"\"Constructeur de la classe Mur\"\"\"\n Cube.__init__(self,x,y,z,larg,long,haut)\n\n def safficher(self):\n \"\"\"Methode d'affichage d'un mur au format :\n mur[x= , y= , z= , larg= , long= , haut= ]\n \"\"\"\n print(\"Mur(x=%.2f,y=%.2f,z=%.2f, larg=%.2f,long=%.2f,haut=%.2f)\"%(self.x, self.y, self.z, self.larg, self.long, self.haut))\n\t\t\t\ndef Creation_Mur(arene):\n \"\"\"Création d'un mur avec une hauteur et une epaisseur fixé par les limites de l'Arene\"\"\"\n\n x = random.randint(0, arene.lx)\n y = random.randint(0, arene.ly)\n z = 1 #un mur est posé au sol\n\n larg = random.randint(10, 50) #largeur arbitraire\n long = random.randint(20,100)\n haut = arene.lz-1 #un mur va jusq'au plafond \n \n return Mur(x, y, z, larg, long, haut)\n\n\n\n#creation d'u constructeur temporaire pour l'affichage tkinter\ndef Creation_Mur_xy(x, y, arene):\n \"\"\"Création d'un mur avec une hauteur et une epaisseur fixé par les limites de l'Arene\"\"\"\n\n x = x\n y = y\n z = 1 # un mur est posé au sol\n\n larg = random.randint(50, arene.lx)\n long = 20\n haut = 499 # un mur monte jusqu'au plafond\n\n return Mur(x, y, z, larg, long, haut)\n \n \n","sub_path":"simulation/basiques/mur.py","file_name":"mur.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"5969528","text":"import pylokimq\nimport base64\nimport subprocess\nimport shlex\nimport argparse\nimport io\nimport time\n\ndef decode_str(data):\n s = ''\n l = ''\n while True:\n ch = data.read(1)\n if ch == b':':\n return data.read(int(l))\n else:\n l += chr(ch)\n\ndef decode_list(data):\n l = []\n ch = data.read(1)\n assert ch == b'l'\n while True:\n ch = data.peek(1)\n if ch == b'e':\n data.read(1)\n return l\n l += decode_value(data)\n\ndef decode_dict(data):\n d = dict()\n ch = data.read(1)\n assert ch == b'd'\n while True:\n ch = data.peek(1)\n if ch == b'e':\n data.read(1)\n return d\n k = decode_str(data)\n v = decode_value(data)\n d[k] = v\n\ndef decode_int(data):\n ch = data.read(1)\n i = ''\n assert ch == b'i'\n while True:\n ch = data.read(1)\n if ch == b'e':\n return int(i)\n i += chr(ch)\n\ndef decode_value(data):\n ch = data.peek(1)\n if ch in b'0123456789':\n return decode_string(data)\n if ch == b'i':\n return decode_int(data)\n if ch == b'd':\n return decode_dict(data)\n if ch == b'l':\n return decode_list(data)\n raise Exception(\"invalid char: {}\".format(ch))\n\n\ndef decode_address(data):\n return '{}.loki'.format(pylokimq.base32z_encode(decode_dict(data)[b's'][b's']))\n\ndef handle_auth(args, cmd):\n cmd += decode_address(io.BytesIO(args[0]))\n cmd += base64.b64encode(args[1]).decode('ascii')\n result = subprocess.run(args=cmd, check=False)\n if result.returncode == 0:\n return \"OK\"\n else:\n return \"REJECT\"\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"--bind\", required=True, help=\"url to bind auth socket to\")\n ap.add_argument(\"--cmd\", required=True, help=\"script to call for authentication\")\n args = ap.parse_args()\n cmd = shlex.split(args.cmd)\n lmq = pylokimq.LokiMQ()\n lmq.listen_plain(args.bind)\n lmq.add_anonymous_category(\"llarp\")\n lmq.add_request_command(\"llarp\", \"auth\", lambda x : handle_auth(x, cmd))\n lmq.start()\n print(\"server started\")\n while True:\n time.sleep(1)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"examples/exit_auth.py","file_name":"exit_auth.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"127592075","text":"students = [\n {'name':'张三','age':23,'score':88,'tel':'23423532','gender':'男'},\n {'name':'李四','age':26,'score':80,'tel':'12533453','gender':'女'},\n {'name':'王五','age':15,'score':58,'tel':'56453453','gender':'男'},\n {'name':'赵六','age':16,'score':57,'tel':'86786785','gender':'保密'},\n {'name':'小明','age':18,'score':98,'tel':'23434656','gender':'女'},\n {'name':'小红','age':23,'score':72,'tel':'67867868','gender':'女'},\n]\n# 1)统计不及格学生的个数\ncount=0\nfor i in students:\n if i[\"score\"]<60:\n count=count+1\nprint(\"不及格人数为:\",count)\n# 2)统计未成年学生的个数\nagecount=0\nfor j in students:\n if j[\"age\"]<18:\n agecount=agecount+1\nprint(\"未成年人数为:\",agecount)\n# 3)打印手机尾号是8的学生的名字\ntelname=[]\nfor n in students:\n a=str(n[\"tel\"])\n b=a[-1]\n b=int(b)\n if b==8:\n telname.append(n[\"name\"])\nprint(\"手机尾号为8的同学是:\",telname)\n# 4)打印最高分和对应的学生的名字\nmaxscore=0\nsconame=\"\"\nfor w in students:\n if w[\"score\"]>maxscore:\n maxscore=w[\"score\"]\n sconame=w[\"name\"]\nprint(\"分数最高的是:\",sconame,maxscore)\n\n# 5)将列表按学生成绩从大到小排序\nmax_score = students[0].get('score')\nnum = 0\nfor i in range(0,len(students)):\n for j in range(i, len(students)):\n if students[j].get('score') > max_score:\n max_score = students[j].get('score')\n num = j\n students[i], students[num] = students[num], students[i]\n max_score = 0\nprint(students)\n\n# 6)删除性别保密的所有学生\nfor index,h in enumerate(students):\n\n if h[\"gender\"]==\"保密\":\n del students[index]\n print(students)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"day06/作业3,学生信息.py","file_name":"作业3,学生信息.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3976037","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 23 14:43:51 2018\r\nTest function on writer object\r\n@author: Shaolun Du\r\n@Contact: Shaolun.du@gmail.com\r\n\"\"\"\r\nimport IO.Writer as Writer\r\n\r\nf_name = \"Output.xlsx\"\r\ns_name = \"Answer\"\r\ntest_str = \"This is a test message---> Hello World!\"\r\nwriter = Writer.Writer( f_name )\r\nwriter.add_sheet(s_name)\r\nwriter.write_ticker( s_name,5,5,test_str)\r\nwriter.close()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"IO/IO_test/test_writer.py","file_name":"test_writer.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"590864082","text":"\"\"\"\n==============================\n 单线程爬虫脚本\n 从指定网址爬取评论并存入数据库\n==============================\n\"\"\"\nprint(__doc__)\n\nimport re\nimport time\nfrom comments.mySpider import MySpider\nfrom database.app_comments_dbHandler import AppCommentsDbHandler\nfrom database.apple_app_dbHandler import AppleAppDbHandler\n\nclass WebToDb(object):\n\n def __init__(self):\n self.__headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; …) Gecko/20100101 Firefox/57.0'}\n self.__pattern = re.compile('.*?(.*?).*?(.*?).*?'\n '(.*?).*?(.*?).*?(.*?).*?'\n '(.*?).*?(.*?).*?'\n '(.*?).*?(.*?)', re.M | re.S)\n self.__appCommentsHandler = AppCommentsDbHandler()\n self.__appleAppHandler = AppleAppDbHandler()\n\n def executeAll(self):\n counter = 0\n appleApps = self.__appleAppHandler.queryAll()\n for appleApp in appleApps:\n print('\\n正在获取苹果应用: %s-%s 的最新评论······' % (appleApp[0],appleApp[1]))\n added = self.executeByAppId(appleApp[0])\n print('新增%d���评论' % added)\n counter += added\n return counter\n\n def executeByAppId(self,appId):\n count_before = self.__appCommentsHandler.count()\n # get comment entries from page 1 to 10\n for currPage in range(1, 11):\n url = \"https://itunes.apple.com/rss/customerreviews/page=\" + str(currPage) + \\\n \"/id=\" + str(appId) + \"/sortby=mostrecent/xml?l=en&&cc=cn\"\n spider = MySpider(url, self.__headers, self.__pattern)\n comments = spider.getMsgs()\n try:\n # insert comment entries from current website page one by one\n for comment in comments:\n commentItemList = list(comment)\n commentItemList.append(appId) # app_id\n commentItemList.append(str('')) # isSpam\n try:\n self.__appCommentsHandler.insertAppComment(commentItemList)\n except UserWarning:\n raise UserWarning('Outdated comments!')\n except Exception as errStr:\n print(errStr)\n except (Exception,UserWarning) as errStr:\n print(errStr,'Update next app\\'s comment!')\n break\n count_after = self.__appCommentsHandler.count()\n return count_after - count_before\n\nif __name__ == '__main__':\n try:\n fr = time.time()\n # print(\"更新%d条评论\" % WebToDb().executeByAppId('1010704842'))\n print(\"本次更新总共新增%d条评论\" % WebToDb().executeAll())\n to = time.time()\n print('运行时间:' + str(to - fr) + '秒')\n except Exception as e:\n print(e)\n","sub_path":"src/comments/webToDb.py","file_name":"webToDb.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"286603080","text":"from django.urls import path, include\nfrom rest_framework import routers\n\nfrom . import api_views\n\napi_router = routers.DefaultRouter()\n\napi_router.register(\n r'tracked_words',\n api_views.TrackedWordsViewSet,\n basename='tracked-words')\n\nurlpatterns = [\n path(r'', include(api_router.urls)),\n]\n","sub_path":"manabi/apps/word_tracking/api_urls.py","file_name":"api_urls.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"418266742","text":"# Code Adapted from boutique ado mini project\n\nfrom django.shortcuts import get_object_or_404\nfrom products.models import Product\n\n\ndef basket_contents(request):\n '''Context processor''' # makes the context dictionary available across the entire application\n\n basket_items = []\n total = 0\n product_count = 0\n basket = request.session.get('basket', {})\n\n for item_id, quantity in basket.items():\n product = get_object_or_404(Product, pk=item_id)\n total += product.price\n product_count += quantity\n basket_items.append({\n 'item_id': item_id,\n 'product': product,\n })\n\n delivery = 5\n\n grand_total = delivery + total\n\n context = {\n 'basket_items': basket_items,\n 'total': total,\n 'product_count': product_count,\n 'delivery': delivery,\n 'grand_total': grand_total,\n }\n\n return context\n","sub_path":"basket/contexts.py","file_name":"contexts.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"605495797","text":"# -*- coding: UTF-8 -*-\n#/*--------------------------------------------------------------------\n# 程序名称: D:/Prog/eclipse-workspace/flashTest/test1.py\n# 执行环境: python3.x\n# 程序描述: 获取基金重仓持股信息\n# 输入参数: \n# \n# 输出参数: 无\n# 涉及实体: \n# 产生实体: \n# 编写人员:3468\n# 创建日期:2020年12月18日\n# 修改日期:\n# 修改内容:\n# 代码版本:\n# 公司名称:lwp \n#----------------------------------------------------------------------*/\n\n#初始化\nfrom flask import Flask,render_template,url_for,escape,request,redirect,jsonify,json\nfrom flask.helpers import url_for\n# app = Flask(__name__)\napp = Flask('test1')\n\n#路由和视图函数\n# @app.route('/')\n# def index():\n# return '

Hello World!

'\n@app.route(\"/\",methods=[\"GET\"])\ndef index():\n return render_template(\"index.html\")\n\n# #路由和视图函数\n# @app.route('/ht')\n# def htmltest():\n# return render_template(\"htmltest.html\")\n\n#路由和视图函数\n@app.route('/ht')\ndef htmltest():\n return render_template(\"htmltest.html\",str=\"第一个html模板+参数传递\")\n\n\n##路由后面如果带\"/\",相当于目录,在url访问的时候,也需要带\"/\"结尾\n##如上面的ht,若试图访问 http://127.0.0.1:5000/ht/ 则会报 Not Found\n\n@app.route('/projects/')\ndef projects():\n return 'The project page'\n\n@app.route('/about')\ndef about():\n return 'The about page'\n\n@app.route('/user/')\ndef profile(username):\n return '{}\\'s profile'.format(escape(username))\n \n##指定请求方法\n@app.route('/login',methods=(\"GET\",\"POST\"))\ndef login():\n ##判断请求方法\n if request.method == \"GET\":\n return render_template(\"login.html\")\n if request.method == \"POST\":\n print(\"header:\"+request.headers)\n print(\"json:\"+request.json)\n print(\"data:\"+request.data)\n \n user_info = request.to_dict()\n \n if user_info.get(\"username\") == \"lwp\" and user_info.get(\"pwd\") == \"123456\":\n print(\"username:\"+user_info.get(\"username\"))\n print(\"pwd:\"+user_info.get(\"pwd\"))\n return redirect(\"/\")\n \n# with app.test_request_context():\n# print(url_for('index'))\n# print(url_for('login'))\n# print(url_for('profile',username='lwp'))\n\n@app.route('/echarts')\ndef echarts():\n xstr=[\"衬衫\",\"羊毛衫\",\"雪纺衫\",\"裤子\",\"高跟鞋\",\"袜子\"]\n print(xstr)\n return render_template(\"echarts.html\",xstr=\"第一个html模板+参数传递\")\n\n\n\n@app.route(\"/weather\", methods=[\"GET\"])\ndef weather():\n if request.method == \"GET\":\n ##res = query_db(\"SELECT * FROM weather\")\n \n# return jsonify(month=[x[0] for x in res],\n# evaporation=[x[1] for x in res],\n# precipitation=[x[2] for x in res])\n #\"1月\",\"二月\",\"三月\",\"四月\",\"五月\",\"六月\",\"七月\",\"八月\",\"九月\",\"十月\",\"十一月\",\"十二月\"\n list_month = [\"1月\",\"二月\",\"三月\",\"四月\",\"五月\",\"六月\",\"七月\",\"八月\",\"九月\",\"十月\",\"十一月\",\"十二月\"]\n list_evaporation = [2,4.9,7,23.2,25.6,76.7,135.6,162.2,32.6,20,6.4,3.3]\n list_precipitation = [2.6,5.9,9,26.4,28.7,70.7,175.6,182.2,48.7,18.8,6,2.3]\n \n return jsonify(month=[\"1月\",\"二月\",\"三月\",\"四月\",\"五月\",\"六月\",\"七月\",\"八月\",\"九月\",\"十月\",\"十一月\",\"十二月\"],\n evaporation=[2,4.9,7,23.2,25.6,76.7,135.6,162.2,32.6,20,6.4,3.3],\n precipitation=[2.6,5.9,9,26.4,28.7,70.7,175.6,182.2,48.7,18.8,6,2.3])\n \n\n##json ajax\n@app.route('/getdata')\ndef get_data():\n language = ['python', 'java', 'c', 'c++', 'c#', 'php']\n value = ['100', '150', '100', '90', '80', '90']\n return json.dumps({'language':language,'value':value},ensure_ascii=False) #如果有中文的话,就需要ensure_ascii=False\n\n#启动服务器,默认ip端口为127.0.0.1:5000\nif __name__ == '__main__':\n app.config['JSON_AS_ASCII'] = False ##jsonify中文显示异常,需要加入\n app.run(debug=True)\n \n ","sub_path":"flaskTest/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"640305271","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport unclebob\n\nfrom os.path import dirname, abspath, join\nLOCAL_FILE = lambda *path: join(abspath(dirname(__file__)), *path)\nsys.path.append(LOCAL_FILE('apps'))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '#(p^g%#^=7!vvjxy7(sx20%8w*f@r)64ofu1isux46zdxd2!a&'\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n (u'Gabriel Falcão', 'gabriel@lettuce.it'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': LOCAL_FILE('uncle.bob'),\n }\n}\n\nTIME_ZONE = 'America/Chicago'\nLANGUAGE_CODE = 'en-us'\nSITE_ID = 1\nUSE_I18N = True\nUSE_L10N = True\nMEDIA_ROOT = ''\nMEDIA_URL = ''\nSTATIC_ROOT = ''\nSTATIC_URL = '/static/'\nADMIN_MEDIA_PREFIX = '/static/admin/'\nSTATICFILES_DIRS = (\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\nROOT_URLCONF = 'urls'\n\nTEMPLATE_DIRS = (\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'foo',\n 'bar',\n)\nTEST_RUNNER = 'unclebob.runners.Nose'\n\nBOURBON_LOADED_TIMES = 0\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"536685780","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom .models import *\nimport jwt, datetime\nimport random\nimport time\nfrom django.http import JsonResponse\nfrom twilio.rest import Client\nfrom django.core.mail import EmailMultiAlternatives\nfrom client.views import verify_token\n\n\n# Function for sending OTP via email\ndef send_email(otp, email_1):\n \n email = EmailMultiAlternatives('2FA OTP for FUNGATAUA trust', ' Your OTP is :' + otp)\n email.to = [email_1]\n email.send()\n\n \n\n\n# Function for sending sms\ndef send_sms(otp):\n\n # Your Account SID from twilio.com/console\n account_sid = \"ACb92105d6cb505863a13e05bef39dc8bd\"\n # Your Auth Token from twilio.com/console\n auth_token = \"44705a3ce65f65f5c7bffc47e398311e\"\n\n client = Client(account_sid, auth_token)\n \n message = client.messages.create(\n to=\"+917048475675\", \n from_=\"+12512903658\",\n body=\"Your otp is \" + str(otp) + \" only valid for 05 mins \")\n\n\n# API for login \nclass LoginView(APIView):\n\n def post(self, request):\n email = request.data['email']\n password = request.data['password']\n user = User.objects.filter(email=email).first()\n\n if user is None:\n context = {\n \"success\":False,\n \"error\":\"User not found\",\n \"message\":\"\",\n \"data\":\n {\n \"email\":email,\n \"is_active\":False\n }\n }\n return JsonResponse(context)\n\n if not user.check_password(password):\n context = {\n \"success\":False,\n \"error\":\"In-correct password\",\n \"message\":\"\",\n \"data\":\n {\n \"email\":email,\n \"is_active\":False\n }\n }\n return JsonResponse(context)\n \n otp = random.randint(1000, 9999) \n print(\"otp :\", otp)\n \n try:\n send_email(str(otp), email)\n except:\n context = {\n \"success\":False,\n \"error\":\"Unable to send otp to given E-Mail\",\n \"message\":\"\",\n \"data\":\n {\n \"email\":email,\n \"is_active\":False\n }\n }\n return JsonResponse(context)\n\n expire_at = time.time() + 300\n\n if user.is_enabled==True:\n \n new_login = loginDetails()\n\n try:\n new_login.email = email\n new_login.otp = otp\n new_login.exp = expire_at\n new_login.is_active = True\n new_login.save()\n except:\n new_login = loginDetails.objects.get(email=email)\n new_login.otp = otp\n new_login.exp = expire_at\n new_login.is_active = True\n new_login.save()\n\n context = {\n \"success\":True,\n \"error\":\"\",\n \"message\":\"OTP sent successully\",\n \"data\":\n {\n \"email\":email,\n \"is_active\":True\n }\n }\n return JsonResponse(context)\n\n else:\n context = {\n \"success\":False,\n \"error\":\"user has disabled your account Please contact administrator\",\n \"message\":\"\",\n \"data\":\n {\n \"email\":email,\n \"is_active\":False\n }\n }\n return JsonResponse(context)\n\n\n#API for checkiing otp and verifying it\nclass OtpVerify(APIView):\n\n def post(self, request):\n\n email = request.data['email']\n otp = request.data['otp']\n current_req = loginDetails.objects.get(email=email)\n\n if current_req.is_active==True:\n if time.time() > float(current_req.exp):\n \n context = {\n \"success\":False,\n \"error\":\"OTP was expired!\",\n \"message\":\"\",\n \"data\":\n {\n \"email\":email,\n }\n }\n return Response(context)\n\n elif int(current_req.otp)==int(otp):\n \n user = User.objects.get(email=email)\n current_req.is_active = False\n current_req.save()\n\n payload = {\n 'id': user.id,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=500),\n 'iat': datetime.datetime.utcnow()\n }\n\n token = jwt.encode(payload, 'secret', algorithm='HS256').decode('utf-8') #generating token\n response = Response()\n response.set_cookie(key='token', value=token, httponly=True)\n \n response.data = {\n \"success\":True,\n \"error\":\"\",\n \"message\":\"User login successfully\",\n \"token\":token,\n \"data\":\n {\n \"id\":user.id,\n \"email\":user.email,\n \"phone_number\":user.phone_number,\n \"is_superadmin\": user.is_superadmin,\n \"client_add\": user.client_add,\n \"client_edit\":user.client_edit ,\n \"services_add\":user.services_add,\n \"services_edit\":user.services_edit,\n \"category_add\" : user.category_add,\n \"category_edit\": user.category_edit,\n \"status_add\": user.status_add,\n \"status_edit\": user.status_edit,\n \"is_enabled\": user.is_enabled,\n }\n }\n return response\n\n else:\n context = {\n \"success\":False,\n \"error\":\"OTP was wrong\",\n \"message\":\"\",\n \"data\":\n {\n \"email\":email\n }\n }\n return Response(context)\n\n else:\n context = {\n \"success\":False,\n \"error\":\"no data\",\n \"message\":\"\",\n \"data\":\"\"\n }\n return Response(context) \n\n\n# API for resending otp to user after expiring. \n@api_view([\"POST\"])\ndef resend_otp(request):\n\n email = request.data['email']\n current_req = loginDetails.objects.get(email=email)\n\n if current_req.is_active:\n # checks if OTP expired or not\n if time.time()> float(current_req.exp):\n otp = random.randint(1000,9999)\n print(\"otp\", otp)\n # send_sms(otp)\n\n try:\n send_email(str(otp), email)\n except:\n context = {\n \"success\":False,\n \"error\":\"Unable to send otp to given E-Mail\",\n \"message\":\"\",\n \"data\":\n {\n \"email\":email,\n \"is_active\":False\n }\n }\n return JsonResponse(context)\n\n expire_at = time.time() + 300\n current_req.otp = otp\n current_req.exp = expire_at\n current_req.save()\n \n context = {\n \"success\":True,\n \"error\":\"\",\n \"message\":\"OTP resend successfully\",\n \"data\":\n {\n \"email\":email\n }\n }\n return Response(context)\n\n else:\n context = {\n \"success\":False,\n \"error\":\"OTP already exist\",\n \"message\":\"\",\n \"data\":\n {\n \"email\":email\n }\n }\n return Response(context)\n\n else:\n context = {\n \"success\":False,\n \"error\":\"no data\",\n \"message\":\"\",\n \"data\":\"\"\n }\n return Response(context)\n\n\nclass ResetPassword(APIView):\n def put(self, request):\n payload = verify_token(request)\n \n try:\n user = User.objects.filter(id=payload['id']).first()\n except:\n return payload\n \n old_password = request.data['old_password']\n new_password = request.data['new_password']\n conf_password = request.data['confirm_password']\n if not user.check_password(old_password):\n context = {\n \"success\":False,\n \"error\":\"Old password does not match\",\n \"message\":\"\",\n \"data\":\"\"\n }\n return Response(context)\n \n if new_password==conf_password:\n user.set_password(new_password)\n user.save()\n context = {\n \"success\":True,\n \"error\":\"\",\n \"message\":\"Password Reset Successfully\",\n \"data\":\"\"\n }\n return Response(context)\n else:\n context = {\n \"success\":False,\n \"error\":\"Password does not match\",\n \"message\":\"\",\n \"data\":\"\"\n }\n return Response(context)\n\n\n# API for logout\nclass LogoutView(APIView):\n\n def get(self, request):\n \n response = Response()\n response.delete_cookie('token') #delete the token\n \n response.data = {\n \"error\":\"\",\n 'message': \"success\"\n }\n return response\n","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"301611952","text":"try:\n import RPi.GPIO as GPIO\nexcept ImportError:\n import OPi.GPIO as GPIO\n GPIO.setboard(GPIO.ZERO)\n\nfrom time import sleep\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\n\ncontrol_pins = (31, 33, 35, 37)\n\nir_pin = 29\nGPIO.setup(ir_pin, GPIO.OUT)\n\nforward_pin = 38\nbackward_pin = 36\nGPIO.setup(forward_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(backward_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\nbutton_pin = 32\nGPIO.setup(button_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n\nseq = [\n [1, 0, 0, 1],\n [1, 1, 0, 0],\n [0, 1, 1, 0],\n [0, 0, 1, 1],\n]\n\n\ndef callback(pin):\n global step\n step = 0\n if GPIO.input(forward_pin):\n step = 1\n if GPIO.input(backward_pin):\n step = -1\n print(abs_position)\n\n\ndef button_callback(channel):\n global run\n if run:\n return\n run = True\n steps = 512\n\n rseq = list(reversed(seq))\n try:\n for i in range(steps):\n for halfstep in range(len(rseq)):\n for pin in range(4):\n GPIO.output(control_pins[pin], rseq[halfstep][pin])\n sleep(0.003)\n except KeyboardInterrupt:\n pass\n except Exception as err:\n print(err)\n\n for pin in control_pins:\n GPIO.output(pin, 0)\n\n run = False\n\n\nfor pin in control_pins:\n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin, 0)\n\n\nGPIO.add_event_detect(forward_pin, GPIO.BOTH, callback=callback)\nGPIO.add_event_detect(backward_pin, GPIO.BOTH, callback=callback)\n\nGPIO.add_event_detect(button_pin, GPIO.RISING, callback=button_callback)\n\nstep = 0\nabs_position = 0\nposition = 0\nrun = False\n\nprint(abs_position)\n\ntry:\n while True:\n if not step:\n run = False\n sleep(0.05)\n else:\n run = True\n for pin in range(4):\n GPIO.output(control_pins[pin], seq[position][pin])\n sleep(0.003)\n position += step\n abs_position += step\n if position < 0:\n position = 3\n if position > 3:\n position = 0\nexcept KeyboardInterrupt:\n pass\nexcept Exception as err:\n print(err)\n\n\nfor pin in control_pins:\n GPIO.output(pin, 0)\n","sub_path":"examples/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"551393685","text":"import cv2\n\ndef diff_img(prev, curr, nxt):\n i = cv2.absdiff(curr, prev)\n j = cv2.absdiff(nxt, curr)\n return cv2.bitwise_and(j, i)\n\ncam = cv2.VideoCapture(0)\n\nw = \"Example 3\"\ncv2.namedWindow(w, cv2.CV_WINDOW_AUTOSIZE)\n\nprev_img = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)\ncurr_img = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)\nnext_img = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)\n\nwhile True:\n cv2.imshow(w, diff_img(prev_img, curr_img, next_img))\n\n prev_img = curr_img\n curr_img = next_img\n next_img = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)\n\n # If you don't have this, the window\n # won't show.\n key = cv2.waitKey(10)\n if key == 27:\n cv2.destroyWindow(w)\n break\n","sub_path":"examples/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"351864737","text":"import json\n\nlocalDict = {}\n\nwith open('CET4.txt', 'r', encoding='utf-8') as f1:\n lst1 = f1.readlines()\n for i in lst1:\n print(i.split())\n word = i.split()[0]\n trans = i.split()[1]\n localDict[word] = trans\nwith open('CET6.txt', 'r', encoding='utf-8') as f2:\n lst2 = f2.readlines()\n for i in lst2:\n word = i.split()[0]\n trans = i.split()[1]\n localDict[word] = trans\n\ndictfile = open('LocalDict.json', 'w', encoding='utf-8')\njson.dump(localDict, dictfile, ensure_ascii=False)\ndictfile.close()\n","sub_path":"Lexicons/make_localdict.py","file_name":"make_localdict.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"347099056","text":"#! /usr/bin/env python\n\nimport logging\nlogger = logging.getLogger(\"robot\")\nlogger.setLevel(logging.DEBUG)\n\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)-15s %(name)s: %(levelname)s - %(message)s')\nconsole.setFormatter(formatter)\n\nlogger.addHandler(console)\n\n\nimport sys\nimport time\nimport Queue as queue\n\nimport pyoro\nimport robots\nfrom robots.behaviours import desires, interrogation\nfrom robots.helpers.cb import nop\n\nhuman = \"HERAKLES_HUMAN1\"\nhuman_mood = \"NORMAL\"\n\nincoming_desires = queue.Queue()\nincoming_human_experiences = queue.Queue()\n\ndef on_human_experience(e):\n logger.info(\"Something happened to the human!! Details:\" + str(e))\n for d in e:\n incoming_human_experiences.put(d)\n\ndef onemotion(e):\n logger.warning(\"New emotional state:\" + str(e))\n\nwith robots.PR2(knowledge = pyoro.Oro(), init = False) as pr2:\n\n desires_performer = desires.DesiresPerformer(pr2)\n\n # Callback for desires\n def ondesires(e):\n logger.info(\"Incomig desires:\" + str(e))\n for sit in e:\n try:\n desire = desires.desire_factory(sit, pr2)\n\n # Has the new desire an higher priority? if yes, interrupt current one.\n desires_performer.trysuperseed(desire)\n incoming_desires.put(desire)\n except desires.NotExistingDesireTypeError as e:\n logger.warning(str(e))\n logger.warning(\"Skipping this desire.\")\n\n def onverbalization(e):\n for t in e:\n text = pr2.knowledge[\"%s verbalisesTo *\" % t][0]\n logger.warning(\"New verbalization from Dialogs: <%s>\" % text)\n pr2.say(text)\n\n\n if \"--init\" in sys.argv:\n logger.info(\"Initializing the robot...\")\n pr2.init(p3d = \"/u/magharbi/openrobots/share/move3d/assets/ADREAM/ADREAM-assets.p3d\")\n\n # Subscribe to new human orders\n pr2.knowledge.subscribe([human + \" desires ?d\"], ondesires)\n\n # subscribe to changes of emotional state\n pr2.knowledge.subscribe([\"myself experiences ?s\"], onemotion)\n\n # subscribe to changes of emotional state\n pr2.knowledge.subscribe([human + \" experiences ?s\"], on_human_experience)\n\n pr2.knowledge.subscribe([\"?sit verbalisesTo ?s\"], onverbalization, var = \"?sit\")\n\n try:\n logger.info(\"Waiting for events...\")\n while True:\n try:\n human_evt = incoming_human_experiences.get(False)\n\n if human_evt:\n evt_type = pr2.knowledge.getDirectClassesOf(human_evt).keys()\n logger.debug(\"Type of human event: \" + str(evt_type))\n if \"InterrogativeState\" in evt_type:\n logger.info(\"The interactor is asking a question. Let's handle it.\")\n question = interrogation.question_factory(human_evt, pr2)\n question.perform()\n\n if \"Fall\" in evt_type:\n logger.info(\"The human falled down! Carambar!\")\n places = pr2.knowledge[human + \" isAt *\"]\n if not places:\n logger.error(\"I've no clue where the human is!\")\n else:\n logger.info(\"I think the human is in \" + str(places))\n logger.info(\"I go to \" + places[0])\n pr2.manipose(nop)\n pr2.look_at([1.0,0.0,1.0,\"base_link\"])\n pr2.goto(places[0])\n pr2.look_at([1.0,0.0,0.5,\"base_link\"])\n pr2.setpose(\"FALL\")\n pr2.release_gripper()\n pr2.wait(1.5)\n pr2.close_gripper()\n pr2.manipose(nop)\n pr2.translate(-0.2)\n except queue.Empty:\n pass\n\n try:\n desire = incoming_desires.get(False)\n desires_performer.perform(desire)\n except queue.Empty:\n pass\n\n time.sleep(0.1)\n except KeyboardInterrupt:\n pass\n\n","sub_path":"scripts/handover.py","file_name":"handover.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"74256272","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom mu2e import mu2e_ext_path\n# from emtracks.particle import trajectory_solver\nfrom emtracks.plotting import config_plots\nconfig_plots()\nplt.rcParams['figure.figsize'] = [13, 11] # larger figures\nplt.rcParams.update({'font.size': 18.0}) # increase plot font size\n\nplot_dir = '/home/ckampa/data/plots/html/deltaB/coilshift/'\n\n# load dfs\n# full DS\ndf_full = pd.read_pickle(mu2e_ext_path+'Bmaps/DSMap_coilshift.p')\ndf_nom = pd.read_pickle(mu2e_ext_path+'Bmaps/Mu2e_DSMap_V13.p')\n# calculate residuals\ndf_full.eval('B = (Bx**2 + By**2 + Bz**2)**(1/2)', inplace=True)\ndf_nom.eval('B = (Bx**2 + By**2 + Bz**2)**(1/2)', inplace=True)\ndf_full.loc[:, 'B_res'] = df_nom['B'] - df_full['B']\ndf_full.loc[:, 'Bx_res'] = df_nom['Bx'] - df_full['Bx']\ndf_full.loc[:, 'By_res'] = df_nom['By'] - df_full['By']\ndf_full.loc[:, 'Bz_res'] = df_nom['Bz'] - df_full['Bz']\n\ndf_full_fms = df_full.query('Z >= 4.25 & Z <= 14. & R <= 0.8').copy()\n# tracker fit\n# new: query full df\ndf_fit = df_full.query('R <= 0.8 & Z >= 8.41 & Z <= 11.66')\n# tracks, tracker\n# df_run = pd.read_pickle(f'/home/ckampa/data/pickles/emtracks/mid_hp_bias_up/B_Residuals_Mau13_Fit_hp_{num}.pkl')\n# tracks, tracker, straws\n# df_run_straws = pd.read_pickle(f'/home/ckampa/data/pickles/emtracks/mid_hp_bias_up/B_Residuals_Mau13_Fit_hp_{num}_Straws.pkl')\n\ndef make_plot(df, file_suffix='_tracker', title_suffix='Tracker Region', log=False):\n label_temp = r'$\\mu = {0:.3E}$'+ '\\n' + 'std' + r'$= {1:.3E}$' + '\\n' + 'Integral: {2}'\n if log:\n log_str = '_log'\n yscale = 'log'\n else:\n log_str = ''\n yscale = 'linear'\n print(\"Generating plots:\"+file_suffix)\n # simple histograms\n N_bins = 200\n lsize = 16\n xmin = df[['Bx_res','By_res','Bz_res','B_res']].min().min()\n xmax = df[['Bx_res','By_res','Bz_res','B_res']].max().max()+1e-5\n bins = np.linspace(xmin, xmax, N_bins+1)\n fig, axs = plt.subplots(2, 2)\n axs[0, 0].hist(df['Bx_res'], bins=bins, label=label_temp.format(df['Bx_res'].mean(), df['Bx_res'].std(), len(df)))\n axs[0, 0].set(xlabel=r\"$\\Delta B_x$\"+\" [Gauss]\", ylabel=\"Count\", yscale=yscale)\n axs[0, 0].legend(prop={'size': lsize})\n axs[0, 1].hist(df['By_res'], bins=bins, label=label_temp.format(df['By_res'].mean(), df['By_res'].std(), len(df)))\n axs[0, 1].set(xlabel=r\"$\\Delta B_y$\"+\" [Gauss]\", ylabel=\"Count\", yscale=yscale)\n axs[0, 1].legend(prop={'size': lsize})\n axs[1, 0].hist(df['Bz_res'], bins=bins, label=label_temp.format(df['Bz_res'].mean(), df['Bz_res'].std(), len(df)))\n axs[1, 0].set(xlabel=r\"$\\Delta B_z$\"+\" [Gauss]\", ylabel=\"Count\", yscale=yscale)\n axs[1, 0].legend(prop={'size': lsize})\n axs[1, 1].hist(df['B_res'], bins=bins, label=label_temp.format(df['B_res'].mean(), df['B_res'].std(), len(df)))\n axs[1, 1].set(xlabel=r\"$\\Delta |B|$\"+\" [Gauss]\", ylabel=\"Count\", yscale=yscale)\n axs[1, 1].legend(prop={'size': lsize})\n # title_main=f'Mau 13 Subtraction with Mau 10 PS+TS: Field Difference from Mau 13\\n{DS_frac:0.3f}xDS, {TS_frac:0.3f}x(PS+TS)\\n'+r\"$(\\Delta B = B_{\\mathregular{Mau10\\ comb.}} - B_{\\mathregular{Mau13}})$\"\n # title_main=f'Hall Probe {num} Biased: Model Fit Residuals ('+ r'$\\Delta B = B_\\mathrm{data} - B_\\mathrm{fit}$'+'):\\n'\n title_main = 'Mau13 - GA Coilshift Proposal\\n'\n fig.suptitle(f'{title_main}{title_suffix}')\n fig.tight_layout(rect=[0,0,1,0.9])\n # plot_file = plot_dir+f'Mau13_{DS_frac:0.3f}xDS_{TS_frac:0.3f}xPS-TS_Comparison_Hists'+file_suffix\n # plot_file = plot_dir+f'Mau13_fit_residuals_hp_{num}'+file_suffix\n plot_file = plot_dir+f'Mau13-Coilshift_residuals'+file_suffix+log_str\n\n fig.savefig(plot_file+'.pdf')\n fig.savefig(plot_file+'.png')\n print(\"Generating plots complete.\\n\")\n\n return fig, axs\n\n# run plotting function\nmake_plot(df_full, '_full', 'Grid in Entire DSMap File')\nmake_plot(df_full_fms, '_fms', 'Grid in FMS Mapped Region of DS (4.25 <= Z <= 14 m) (R <= 0.8 m)')\nmake_plot(df_fit, '_tracker', 'Grid in Tracker Region')\nmake_plot(df_full, '_full', 'Grid in Entire DSMap File', True)\nmake_plot(df_full_fms, '_fms', 'Grid in FMS Mapped Region of DS (4.25 <= Z <= 14 m) (R <= 0.8 m)', True)\nmake_plot(df_fit, '_tracker', 'Grid in Tracker Region', True)\n# make_plot(df_run, '_tracker_tracks', 'Signal e- Tracks in Tracker Region')\n# make_plot(df_run_straws, '_tracker_tracks_straws', 'Signal e- Tracks in Tracker Region (40 cm <= R <= 70 cm)')\n","sub_path":"scripts/FieldFitting/coilshift_deltaB_plots.py","file_name":"coilshift_deltaB_plots.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"152054473","text":"import os\nimport sys\n\n\ndef save_coordinates(path):\n ''' save all the coordinates in a set '''\n visited = set()\n x = 0\n y = 0\n for move in path:\n direction = move[0]\n distance = int(move[1:])\n # add the coordinates along a direction\n for _ in range(distance):\n if direction == 'R':\n x += 1\n elif direction == 'L':\n x -= 1\n elif direction == 'U':\n y += 1\n else:\n y -= 1\n visited.add((x, y))\n return visited\n\n\nfilepath = f'{os.getcwd()}/Q3/input.txt'\nwith open(filepath, 'r') as f:\n path_1 = f.readline().split(',')\n path_2 = f.readline().split(',')\n\ncoordinates_1 = save_coordinates(path_1)\ncoordinates_2 = save_coordinates(path_2)\n\n# find matching coordinates for the paths\nintersection = coordinates_1.intersection(coordinates_2)\n\n# find closest point to origin\nmin_distance = sys.maxsize\nfor pos in intersection:\n distance = abs(pos[0]) + abs(pos[1])\n min_distance = min(distance, min_distance)\nprint(min_distance)\n","sub_path":"Q3/Q3P1.py","file_name":"Q3P1.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"125673640","text":"import os\n\nfrom rdflib import Namespace, Graph\nfrom rdflib.namespace import OWL, FOAF, RDF, RDFS\n\nfrom constants import SCHEMA_DIR\n\n\nSTUDYBOT = Namespace('http://www.example.org/')\nSTUDY = Namespace(os.path.join('file:///', SCHEMA_DIR, 'study.ttl'))\nTEACH = Namespace('http://linkedscience.org/teach/ns#')\nAIISO = Namespace('http://purl.org/vocab/aiiso/schema#')\nICAL = Namespace('http://www.w3.org/2002/12/cal#')\nVIVO = Namespace('http://vivoweb.org/ontology/core')\n\ngraph = Graph()\ngraph.base = STUDYBOT\ngraph.bind('study', STUDY)\ngraph.bind('teach', TEACH)\ngraph.bind('aiiso', AIISO)\ngraph.bind('vivo', VIVO)\ngraph.bind('ical', ICAL)\ngraph.bind('foaf', FOAF)\ngraph.bind('rdfs', RDFS)\ngraph.bind('rdf', RDF)\ngraph.bind('owl', OWL)\n","sub_path":"src/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"627198856","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : decorator的学习.py\n# @Author: 胡克林\n# @Date : 2019/9/13\n# @Desc : Hello world!\ndef verification(*args):\n def login(User, Pwd, **others):\n nStatus = 0\n if args[0]:\n print('开启用户和密码验证:')\n if User == 'test' and Pwd == '123456':\n print('登录成功!')\n nStatus = 1\n else:\n print('登录失败!')\n nStatus = 0\n else:\n print('不开启验证,直接登录!')\n nStatus = 1\n print('用户名:', User, ',密码:', Pwd)\n print('登录过程结束……')\n return nStatus\n return login # 函数作为返回值\n\n\ndef t_login(User, Pwd, isTrue, **others):\n f = verification(isTrue)\n nStatus = f(User, Pwd)\n print('登录状态:', nStatus)\n\n\nt_login('test', '123456', False)\n","sub_path":"decorator的学习.py","file_name":"decorator的学习.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"320029811","text":"import pymongo\r\nfrom datetime import datetime\r\nfrom getpass import getpass\r\nfrom pprint import pprint\r\nfrom neo4j import GraphDatabase\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nimport re\r\n\r\nclass GraphConnect:\r\n\r\n def __init__(self):\r\n #self.driver=GraphDatabase.driver(\"bolt://localhost\",auth=(\"neo4j\",\"root\"))\r\n self.driver=GraphDatabase.driver(\"bolt://172.16.0.160:7687\",auth=(\"neo4j\",\"LSMDB\"))\r\n \r\n def delete(self,type,name,optionalName):\r\n deleteString=\"\"\r\n secondaryDelete=\"\"\r\n if(type==\"Nation\"):\r\n deleteString='MATCH(hotel:Hotel{nation:\"'+name+'\"})<-[review:Review]-(reviewer:Reviewer) WITH reviewer,collect(review) as itsRels FOREACH(singleReview in itsRels| DELETE singleReview)' \r\n secondaryDelete='MATCH(hotel:Hotel{nation:\"'+name+'\"}) DELETE hotel'\r\n elif(type==\"City\"):\r\n deleteString='MATCH(hotel:Hotel{city:\"'+name+'\"})<-[review:Review]-(reviewer:Reviewer) WITH reviewer,collect(review) as itsRels FOREACH(singleReview in itsRels| DELETE singleReview)' \r\n secondaryDelete='MATCH(hotel:Hotel{city:\"'+name+'\"}) DELETE hotel'\r\n elif(type==\"Reviewer\"):\r\n deleteString='MATCH(hotel:Hotel)<-[r:Review]-(reviewer:Reviewer{name:\"'+name+'\"}) DELETE reviewer,r'\r\n secondaryDelete='MATCH(reviewer:Reviewer{name:\"'+name+'\"}) DELETE reviewer'\r\n elif(type==\"Hotel\"):\r\n deleteString='MATCH(hotel:Hotel{name:\"'+name+'\"})<-[review:Review]-(reviewer:Reviewer) WITH reviewer,collect(review) as itsRels FOREACH(singleReview in itsRels| DELETE singleReview)' \r\n secondaryDelete='MATCH(hotel:Hotel{name:\"'+name+'\"}) DELETE hotel'\r\n elif(type==\"Review\"):\r\n deleteString='MATCH(hotel:Hotel{name:\"'+name+'\"})<-[review:Review]-(reviewer:Reviewer{name:\"'+optionalName+'\"}) DELETE review'\r\n else:\r\n pass\r\n session=self.driver.session()\r\n session.run(deleteString)\r\n if(secondaryDelete!=\"\"):\r\n session.run(secondaryDelete)\r\n session.close()\r\n \r\n def getFakeReviewer(self,nation):\r\n executionString=\"CALL algo.betweenness.stream('MATCH (reviewer:Reviewer) RETURN id(reviewer) as id','MATCH (a1:Reviewer)-[:Review]->(n:Hotel{nation:\"+'\"'+nation+'\"'+\"})<-[:Review]-(a:Reviewer) RETURN id(a) as source,id(a1) as target',{graph:'cypher',strategy:'degree'}) YIELD nodeId,centrality RETURN algo.getNodeById(nodeId).name,centrality order by centrality desc LIMIT 20\"\r\n session=self.driver.session()\r\n result=session.run(executionString)\r\n session.close()\r\n return result\r\n \r\n def getPopularHotel(self,type,parameters):\r\n startString='MATCH(hotel:Hotel'\r\n parameterString=\"\"\r\n if(type==\"Nation\"):\r\n parameterString='{nation:\"'+parameters[0]+'\"'+'})<-[review:Review]-(reviewer:Reviewer) USING index hotel:Hotel(nation)'\r\n else:\r\n parameterString='{nation:\"'+parameters[0]+'\"'+',city:\"'+parameters[1]+'\"'+'})<-[review:Review]-(reviewer:Reviewer) USING index hotel:Hotel(city)'\r\n endString='WITH distinct hotel.name as nameHotel,count(review) as countHotel RETURN nameHotel,countHotel ORDER BY countHotel desc limit 50'\r\n totalString=(startString+parameterString+endString)\r\n session=self.driver.session()\r\n result=session.run(totalString)\r\n session.close()\r\n return result\r\n \r\n def getPopularReviewer(self,type,parameters):\r\n startString='MATCH(hotel:Hotel'\r\n parameterString=\"\"\r\n if(type==\"Nation\"):\r\n parameterString=parameterString='{nation:\"'+parameters[0]+'\"'+'})<-[review:Review]-(reviewer:Reviewer) USING index hotel:Hotel(nation)'\r\n else:\r\n parameterString='{nation:\"'+parameters[0]+'\"'+',city:\"'+parameters[1]+'\"'+'})<-[review:Review]-(reviewer:Reviewer) USING index hotel:Hotel(city)'\r\n endString='WITH distinct reviewer.name as nameReviewer,count(review) as countReviewer RETURN nameReviewer,countReviewer ORDER BY countReviewer desc limit 50'\r\n totalString=(startString+parameterString+endString)\r\n session=self.driver.session()\r\n result=session.run(totalString)\r\n session.close()\r\n return result\r\n \r\n def getReccomendedHotel(self,reviewerName):\r\n startString='MATCH(reviewer:Reviewer)-[firstReview:Review]->(hotel:Hotel)<-[secondReview:Review]-(secondReviewer:Reviewer)-[thirdReview:Review]->(secondHotel:Hotel) WHERE reviewer.name=\"'\r\n parameterString=reviewerName+'\"'\r\n endString='and toFloat(firstReview.vote)>7.0 and toFloat(secondReview.vote)>7.0 and toFloat(thirdReview.vote)>7.0 and reviewer<>secondReviewer and secondHotel<>hotel WITH reviewer,collect(secondHotel)as goodHotel UNWIND goodHotel as searchedHotel RETURN searchedHotel.name LIMIT 10'\r\n totalString=(startString+parameterString+endString)\r\n session=self.driver.session()\r\n result=session.run(totalString)\r\n session.close()\r\n return result\r\n","sub_path":"code/graphConnection.py","file_name":"graphConnection.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"475876046","text":"import random\nfrom telegram.ext import Dispatcher, CommandHandler, ConversationHandler\n# n = random.randint(1,99)\ngames = {}\ndef help():\n return \"\"\" \n 猜一个0-100之间的数字。You guessed a number from 0 - 100.\n/DGuess 查看现在的状态和获取帮助。Check your current status and get help.\n/DGuess number 输入number猜数字,看谁用的次数最少。Enter number and see who uses it the least often. \"\"\"\ndef guessing(update, context):\n chatid = update.message.chat.id\n if not (chatid in games):\n games[chatid] = {'randomnumber': random.randint(1,99), \"members\":{}}\n print(games)\n fname = update.message.from_user.first_name\n global etries\n if len(context.args) == 0 :\n update.message.reply_text(help()) \n return\n else:\n b = context.args[0]\n a = int(b)\n aa = b\n if fname in games[chatid]:\n games[chatid]['members'][fname] += 1\n else:\n games[chatid]['members'][fname] = 1\n msg8 = \"\"\n gn = \"亲爱的 %s,请猜一个0-100之间的数字。\\nGuess a number from 0 - 100.\\n\\n \"%(fname)\n msg8 += gn\n if not aa.isdigit() :\n msg8 += \"想什么呢审题呀 ur bad thats not a number\"\n else:\n if a < 0 or a > 100:\n msg8 += \"\\n0-100 \\nTries of %s\"%(games[chatid]['members'][fname])\n else:\n if a == games[chatid]['randomnumber']:\n if etries == 0 or etries > games[chatid] : \n etries = etries + games[chatid]\n msg8 += \"Ayyy You guessed it! %s guessed it in %s tries. 天呐你竟然猜到了 概率是 '%%1' 呀! 500 XP %s “它“值得拥有。竟然 %s 次就猜到了.\"%(games[chatid]['members'][fname],games[chatid],games[chatid]['members'][fname],games[chatid]) \n games[chatid]['randomnumber'] = random.randint(1,99)\n elif a > games[chatid]['randomnumber'] :\n msg8 += \"大了!重猜!It's too big! Guess again!\"\n elif a < games[chatid]['randomnumber'] :\n msg8 += \"小了!重猜!It's too small! Guess again!\"\n msg8 += \"\\nTries: %s\\n\\nAuthorised By Noah <3\\n作者:Noah\"%(games[chatid]['members'])\n update.message.reply_text(msg8)\n\ndef add_handler(dp:Dispatcher):\n guess_handler = CommandHandler('DGuess',guessing)\n dp.add_handler(guess_handler)\n","sub_path":"Bots/Dank_Memer_bot/guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"486972305","text":"import turtle\r\nturtle.reset()\r\nturtle.shape(\"classic\")\r\n\r\ndef drow_deshed_line(length:int, color:str):\r\n \"\"\"Рисует штриховую линию из центра length и color цвета\"\"\"\r\n for i in range(length):\r\n turtle.color(color)\r\n turtle.pendown()\r\n turtle.fd(10)\r\n turtle.penup()\r\n turtle.fd(10)\r\n\r\ndrow_deshed_line(15, \"red\")\r\nturtle.mainloop()","sub_path":"abc.py","file_name":"abc.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"383415000","text":"'''\n\n'''\n\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution(object):\n def kthSmallest(self, root, k):\n \"\"\"\n :type root: TreeNode\n :type k: int\n :rtype: int\n \"\"\"\n # stack = [(0, root)]\n # while stack:\n # c, node = stack.pop()\n # if not node:\n # continue\n # if c == 1:\n # k -= 1\n # if k == 0:\n # return node.val\n # else:\n # stack.append((0, node.right))\n # stack.append((1, node))\n # stack.append((0, node.left))\n\n # Runtime: 32 ms, faster than 99.19% of Python online submissions for Kth Smallest Element in a BST.\n # Memory Usage: 20.7 MB, less than 53.08% of Python online submissions for Kth Smallest Element in a BST.\n\n self.k = k\n self.res = None\n\n def dfs(root):\n if not root:\n return\n dfs(root.left)\n self.k -= 1\n if self.k == 0:\n self.res = root.val\n return\n dfs(root.right)\n\n dfs(root)\n return self.res\n# Runtime: 52 ms, faster than 50.67% of Python online submissions for Kth Smallest Element in a BST.\n# Memory Usage: 20.7 MB, less than 37.76% of Python online submissions for Kth Smallest Element in a BST.","sub_path":"algorithms/tree/leetcode-230-KthSmallestElementinaBST.py","file_name":"leetcode-230-KthSmallestElementinaBST.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"565177784","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 24 16:35:21 2016\n\n@author: Administrator\n\"\"\"\n\nimport re\n\nstrings = open('C:\\\\Users\\\\Administrator\\\\Desktop\\\\wordZ.txt').readlines()\nst = [sti.split('\\n')[0] for sti in strings ]\n\nfor i in st:\n print ('——',i,'——')\n \n\n\n","sub_path":"demo_test/demo0824.py","file_name":"demo0824.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"287162054","text":"#!/usr/bin/env python\n \n \nimport time\nimport serial\n \nser = serial.Serial(port='/dev/ttyS0',baudrate = 9600,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE,bytesize=serial.EIGHTBITS,timeout=1)\ncounter=0\n \n \nwhile 1:\n ser.write(str.encode('Write counter: %d \\n'%(counter)))\n time.sleep(1)\n counter += 1\n","sub_path":"write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"585260582","text":"from django.contrib.auth.models import User\nfrom adventure.models import Player, Room\nimport random\n\n\nRoom.objects.all().delete()\n# instantiate list of lists (list of rows) for the initial 15x15 grid filled w/ None objects in each position\nworld_matrix = [[None for _ in range(15)] for _ in range(15)]\n# pick a random point in the grid, place a room there, and implement random walk algo to build rooms\nstart_x = random.choice(range(15))\nstart_y = random.choice(range(15))\n# current x and y vars for each new room creation\nx = start_x\ny = start_y\n# initialize room counter at 1\nroom_count = 1\n# instantiate first room at the starting position and save to the db\nroom = Room(title=f'room{room_count}',\n description=f'desc{room_count}', x=x, y=y)\nroom.save()\n# update the world_matrix position at x and y w/ the newly created room object\nworld_matrix[y][x] = room\n\n# loop thru the random walk until 100 rooms have been created\nwhile room_count < 100:\n directions = ['n', 's', 'e', 'w']\n reverse_map = {'n': 's', 's': 'n', 'e': 'w', 'w': 'e'}\n # get possible directions\n if room.x == 0: # left edge of grid, cant go west\n directions.remove('w')\n elif room.x == 14: # right edge, cant go east\n directions.remove('e')\n if room.y == 0: # top edge of grid, cant go north\n directions.remove('n')\n elif room.y == 14: # bottom edge, cant go south\n directions.remove('s')\n # random choice from poss directions\n direction = random.choice(directions)\n if direction == 'n':\n # its minus because rows go top to bottom in the matrix (0, 0 is upper left corner)\n y -= 1\n elif direction == 's':\n y += 1\n elif direction == 'e':\n x += 1\n else: # west\n x -= 1\n prev_room = room\n # if no room exists in that direction:\n if world_matrix[y][x] is None:\n # increment room_count, instantiate the room, save it to the db, and update the world_matrix at x and y\n room_count += 1\n room = Room(title=f'room{room_count}',\n description=f'desc{room_count}', x=x, y=y)\n room.save()\n world_matrix[y][x] = room\n else: # room already there, just update current room with the room in that position\n room = world_matrix[y][x]\n # update the connections\n prev_room.connectRooms(room, direction)\n room.connectRooms(prev_room, reverse_map[direction])\n\nplayers = Player.objects.all()\nfor p in players:\n p.currentRoom = world_matrix[start_y][start_x].id\n p.save()\n\n\n#### Old code for creatingthe 10x10 grid of fully connected rooms ####\n\n# #start at 0,0 iterate across until x reaches 10, then move up one row\n# room_coords = []\n# for i in range(10):\n# \tfor j in range(10):\n# \t\troom_coords.append((j,i))\n\n# room_num = 1\n# rooms = []\n# for coord in room_coords:\n# \troom = Room(title = f'room{room_num}', description = f'desc{room_num}', x=coord[0], y=coord[1])\n# \troom.save()\n# \trooms.append(room)\n# \troom_num += 1\n\n# ### Link rooms together\n\n# for room in rooms:\n# \tif room.y < 9: #add connection to the north\n# \t\troom_above = rooms[(room.id + 9) % 100]\n# \t\troom.connectRooms(room_above, 'n')\n# \tif room.y > 0: #add connection to the south\n# \t\troom_below = rooms[(room.id - 11) % 100]\n# \t\troom.connectRooms(room_below, 's')\n# \tif room.x > 0: #add connection to the west\n# \t\troom_left = rooms[(room.id -2) % 100]\n# \t\troom.connectRooms(room_left, 'w')\n# \tif room.x < 9: #add connection to the east\n# \t\troom_right = rooms[room.id % 100]\n# \t\troom.connectRooms(room_right, 'e')\n","sub_path":"util/create_rooms_grid.py","file_name":"create_rooms_grid.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"44006101","text":"from timing import basic_timer, full_profile, line_profile\n\n\ndef get_number():\n for x in range(500000):\n yield x\n\n\n@basic_timer\ndef basic():\n for x in get_number():\n i = x ^ x ^ x\n\nprint(\"\\n***BASIC***\\n\")\nbasic()\nprint(\"\\n\")\n\n\n@full_profile\ndef full():\n for x in get_number():\n i = x ^ x ^ x\n\nprint(\"\\n***FULL***\\n\")\nfull()\nprint(\"\\n\")\n\n\n@line_profile(follow=[get_number])\ndef line_by_line():\n for x in get_number():\n i = x ^ x ^ x\n\nprint(\"\\n***BY LINE***\\n\")\nc = line_by_line()\nprint(\"\\n\")\n","sub_path":"timing_demo.py","file_name":"timing_demo.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"35976665","text":"\"\"\"\nParser: local process\n Role: access the data queue locally\n References:\n https://docs.python.org/2/library/multiprocessing.html\n Credit for:\n https://github.com/ssepulveda/RTGraph\n\"\"\"\nimport signal, time\nimport multiprocessing as mp\n\n\nclass Parser(mp.Process):\n\n def __init__(self, data, samples):\n mp.Process.__init__(self)\n self._importQ = mp.Queue()\n self._sample = samples\n self._exportQ = data\n self.count = 0\n self._exit = mp.Event()\n\n def check_init(self):\n return self.name is not None\n\n def run(self):\n while not self._exit.is_set():\n self._get_data()\n time.sleep(0.005)\n self._get_data()\n\n def stop(self):\n self._exit.set()\n\n # Collect raw data in _importQ\n def add(self, data):\n self._importQ.put(data)\n\n # Export data from serial port to worker\n # Raw data from Serial/Simulator process is collected in _importQ\n # Helper function: parse_data\n # Output queue: _exportQ\n def _get_data(self):\n while not self._importQ.empty():\n queue = self._importQ.get(timeout=0.05)\n self.parse_data(queue[0], queue[1])\n\n # Convert and Export data in _importQ to _exportQ\n def parse_data(self, t, line):\n if len(line) > 0:\n try:\n if type(line) == bytes:\n values = line.decode(\"UTF-8\").split(',')\n elif type(line) == str:\n values = line.split(',')\n else:\n raise TypeError\n values = [float(v) for v in values]\n self._exportQ.put((t, values))\n except ValueError:\n print('Value Error')\n except AttributeError:\n print('Attribute Error')\n\n\n\n\n\n\n\n\n\n","sub_path":"processes/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"360323958","text":"# coding = utf-8\nfrom sklearn.svm import SVC\nimport pandas as pd\nfrom pandas import DataFrame\ndef predict(data,label, testdata, testlabel,batchsize,testbatchsize):\n acclist = []\n checklist =[]\n timestep = len(data)/batchsize\n # find a classifier on the first batch\n clf = SVC()\n clf.fit(data[0:batchsize ,:], label[0:batchsize])\n acclist.append(clf.score(testdata[0:batchsize ,:], testlabel[0:batchsize]))\n for t in range(1, timestep):\n batchdata = testdata[t*testbatchsize : (t+1)*testbatchsize]\n batchtarget = testlabel[t*testbatchsize : (t+1)*testbatchsize]\n # choose error as change indicator\n acc = clf.score(batchdata, batchtarget)\n # print(\"%d error %.1f %%\"%(t, err*100))\n acclist.append(acc)\n checklist.append(acc)\n return acclist\n\nif __name__ == '__main__':\n dataframe = pd.read_table('datasource/seadata.txt', sep=',', header=None).values\n data = dataframe[:, :-1]\n label= dataframe[:, -1]-1\n testframe = pd.read_table('datasource/seatest.txt', sep=',', header=None).values\n testdata = testframe[:, :-1]\n testlabel = testframe[:, -1] - 1\n svm = predict(data, label, testdata, testlabel)\n csvwriter = DataFrame(svm, columns=['svm'])\n csvwriter.to_csv('Result/svm.csv', index=False, header=False)","sub_path":"CDs/SVM_only.py","file_name":"SVM_only.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"615551533","text":"import requests\nimport csv\n\nimport bs4\nimport expanddouban\n\"\"\"\nreturn a string corresponding to the URL of douban movie lists given category and location.\n\"\"\"\ndef getMovieUrl(category, location):\n\turl = \"https://movie.douban.com/tag/#/?sort=S&range=9,10&tags=电影,{},{}\".format(category,location)\n\treturn url\nprint(getMovieUrl(\"剧情\",\"美国\"))\n\n\n\"\"\"\nurl: the douban page we will get html from\nloadmore: whether or not click load more on the bottom\nwaittime: seconds the broswer will wait after intial load and\n\"\"\"\ndef getHtml(url, loadmore = True, waittime = 2):\n browser = webdriver.Chrome('chromedriver')\n browser.get(url)\n time.sleep(waittime)\n if loadmore:\n while True:\n try:\n next_button = browser.find_element_by_class_name(\"more\")\n next_button.click()\n time.sleep(waittime)\n except:\n break\n html = browser.page_source\n browser.quit()\n return html\n\n\n\ndef getMovies(category, location):\n url = getMovieUrl(category, location)\n html = expanddouban.getHtml(url, True)\n soup = bs4.BeautifulSoup(html, \"html.parser\")\n content_div = soup.find(\"div\",class_=\"list-wp\")\n movie_name = []\n for element in content_div.find_all(\"a\", recursive=False):\n if element.p.span:\n movie_name.append(element.p.span.get_text())\n return movie_name\n\nclass Movie(object):\n\t\"\"\"docstring forMovie.\"\"\"\n\tdef __init__(self, name, rate, location, category, info_link, cover_link):\n\t\t# superMovie, self).__init__()\n\t\tself.name = name\n\t\tself.rate = rate\n\t\tself.location = location\n\t\tself.category = category\n\t\tself.info_link = info_link\n\t\tself.cover_link = cover_link\nm = Movie(\"肖申克的救赎\",\"9.6\",\"美国\",\"剧情\",\"https://movie.douban.com/subject/1292052/\",\"https://img3.doubanio.com/view/movie_poster_cover/lpst/public/p480747492.jpg\")\n\ndef getLocations(category):\n\turl = \"https://movie.douban.com/tag/#/?sort=S&range=9,10&tags=电影,{}\".format(category)\n\thtml = expanddouban.getHtml(url)\n\tsoup = bs4.BeautifulSoup(html,\"html.parser\")\n\tlocations = []\n\tcontent_ul = soup.find(\"div\",class_=\"tags\").ul.find_next_sibling(\"ul\").find_next_sibling(\"ul\")\n\tfor child in content_ul.children:\n\t\tlocations.append(child.span.get_text())\n\treturn locations[1:]\n\nmy_favorite_categories = [\"科幻\",\"动作\",\"青春\"]\nnum_movies = 0\n\n\ndef myFavoriteMovies(categories):\n\tnum_movies = 0\n\tname_movies = []\n\tfor category in categories:\n\t\tfor location in getLocations(category):\n\t\t\turl = getMovieUrl(category,location)\n\t\t\thtml = expanddouban.getHtml(url, True)\n\t\t\tsoup = bs4.BeautifulSoup(html, \"html.parser\")\n\t\t\tcontent_div = soup.find(\"div\",class_=\"list-wp\")\n\t\t\tfor element in content_div.find_all(\"a\", recursive=False):\n\t\t\t\tif element.p.span:\n\t\t\t\t\tname = element.p.span.get_text()\n\t\t\t\t\trate = element.p.span.find_next_sibling(\"span\").get_text()\n\t\t\t\t\tinfo_link = element.get('href')\n\t\t\t\t\tcover_link = element.img.get('src')\n\t\t\t\t\tif name not in name_movies:\n\t\t\t\t\t\tname_movies.append(name)\n\t\t\t\t\t\tcreateVar = locals()\n\t\t\t\t\t\tcreateVar['movie'+str(num_movies)] = Movie(name, rate, location, category, info_link, cover_link)\n\t\t\t\t\t\tnum_movies += 1\n\twith open('movies.csv', 'w', newline='') as csvfile:\n\t\tspamwriter = csv.writer(csvfile, dialect='excel')\n\t\tfor num in range(len(name_movies)):\n\t\t\tspamwriter.writerow([createVar['movie'+str(num)].name, createVar['movie'+str(num)].rate, createVar['movie'+str(num)].location, createVar['movie'+str(num)].category, createVar['movie'+str(num)].info_link, createVar['movie'+str(num)].cover_link])\n\t\t\tprint(num)\n\n\nwith open('movies.csv', 'r') as f:\n reader = csv.reader(f)\n movies = list(reader)\n\n\ndef count_num_of_category(movies,category):\n\tcreateVar = locals()\n\tcreateVar['num_'+str(category)] = 0\n\n\tfor movie in movies:\n\t\tif movie[3] == category:\n\t\t\tcreateVar['num_'+str(category)] += 1\n\n\treturn createVar['num_'+str(category)]\n\n\ndef count_num_of_category_location(movies,category,location):\n\tcreateVar = locals()\n\tcreateVar['num_'+str(category)+str(location)] = 0\n\n\tfor movie in movies:\n\t\tif movie[3] == category and movie[2] == location:\n\t\t\tcreateVar['num_'+str(category)+str(location)] += 1\n\n\treturn createVar['num_'+str(category)+str(location)]\n\n\nresults_for_output = []\ndef find_topthree_locations(movies,category):\n\tlocations = getLocations(category)\n\tlocation_dict = {}\n\tnum_list = []\n\tnum = 0\n\n\tfor location in locations:\n\t\tnum = count_num_of_category_location(movies,category,location)\n\t\tnum_list.append(num)\n\n\tnum_list = sorted(num_list)\n\n\tfor location in locations:\n\t\tnum = count_num_of_category_location(movies,category,location)\n\t\tlocation_dict[location] = num\n\n\treturn sorted(location_dict,key=lambda x:location_dict[x])[-1], sorted(location_dict,key=lambda x:location_dict[x])[-2], sorted(location_dict,key=lambda x:location_dict[x])[-3]\n\n\ndef find_topthree_locations_in_my_favorite_categories(movies,categories):\n\tfor category in categories:\n\t\tcreateVar = locals()\n\t\tcreateVar[\"top_three_in_\"+str(category)] = find_topthree_locations(movies,category)\n\t\tprint(\"在{}类电影中,数量排名前三的地区分别为{}、{}、{}。\".format(category,createVar[\"top_three_in_\"+str(category)][0],createVar[\"top_three_in_\"+str(category)][1],createVar[\"top_three_in_\"+str(category)][2]))\n\n\ndef count_percentage(movies,category):\n\tlocations = getLocations(category)\n\tcreateVar = locals()\n\ttop_three_locations = []\n\n\tcreateVar[\"top_three_in_\"+str(category)] = find_topthree_locations(movies,category)\n\tfor n in range(3):\n\t\ttop_three_locations.append(createVar[\"top_three_in_\"+str(category)][n])\n\n\tresults_for_output.append(\"在{}类电影中,数量排名前三的地区分别为{}、{}、{}。\".format(category,top_three_locations[0],top_three_locations[1],top_three_locations[2]))\n\n\tfor loca in top_three_locations:\n\t\tnum_loca = count_num_of_category_location(movies,category,loca)\n\t\tnum_cate = count_num_of_category(movies,category)\n\t\tpercentage = float(num_loca/num_cate)\n\t\tpercentage = round(percentage,2)*100\n\t\tresults_for_output.append(\"{}占{}类别的电影的百分比为{}%。\".format(loca,category,percentage))\n\n\ndef count_all_percentages(movies,categories):\n\tfor category in categories:\n\t\t count_percentage(movies,category)\n\ncount_all_percentages(movies,my_favorite_categories)\n\nf = open(\"output.txt\",'w')\nfor n in range(len(results_for_output)):\n\tf.write(results_for_output[n])\n\tf.write('\\n')\n\nf.close()\n","sub_path":"best movie/bestmovie.py","file_name":"bestmovie.py","file_ext":"py","file_size_in_byte":6390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"508750130","text":"\"\"\"\nCreated by Sanjay at 9/14/2018\n\nFeature:\nImplementation of Apriori - Association Rule Mining algorithm\n\"\"\"\nimport sys\nimport os\nfrom itertools import chain, combinations\nfrom collections import defaultdict\nfrom sys import exit\n\ndef subsets(arr):\n return chain (*[combinations (arr, i + 1) for i, a in enumerate (arr)])\n\n\ndef generate_freq_items(itemSet, transactionList, minSupport, freqSet):\n _itemSet = set ()\n localSet = defaultdict (int)\n\n for item in itemSet:\n for transaction in transactionList:\n if item.issubset (transaction):\n freqSet[item] += 1\n localSet[item] += 1\n\n for item, count in localSet.items ():\n support = float (count) / len (transactionList)\n\n if support >= minSupport:\n _itemSet.add (item)\n\n return _itemSet\n\n\ndef merge_set(itemSet, length):\n return set ([i.union (j) for i in itemSet for j in itemSet if len (i.union (j)) == length])\n\n\ndef get_transaction_list(data_iterator):\n transactionList = list ()\n itemSet = set ()\n for record in data_iterator:\n transaction = frozenset (record)\n transactionList.append (transaction)\n for item in transaction:\n itemSet.add (frozenset ([item])) # Generate 1-itemSets\n return itemSet, transactionList\n\n\ndef generate_association_rules(data_iter, minSupport, minConfidence):\n itemSet, transactionList = get_transaction_list (data_iter)\n\n freqSet = defaultdict (int)\n largeSet = dict ()\n\n assocRules = dict ()\n\n oneCSet = generate_freq_items (itemSet,\n transactionList,\n minSupport,\n freqSet)\n\n currentLSet = oneCSet\n k = 2\n while (currentLSet != set ([])):\n largeSet[k - 1] = currentLSet\n currentLSet = merge_set (currentLSet, k)\n currentCSet = generate_freq_items (currentLSet,\n transactionList,\n minSupport,\n freqSet)\n currentLSet = currentCSet\n k = k + 1\n\n def get_support(item):\n return float (freqSet[item]) / len (transactionList)\n\n items_to_return = []\n for key, value in largeSet.items ():\n items_to_return.extend ([(tuple (item), get_support (item))\n for item in value])\n\n rules_to_return = []\n for key, value in list (largeSet.items ())[1:]:\n for item in value:\n _subsets = map (frozenset, [x for x in subsets (item)])\n for element in _subsets:\n remain = item.difference (element)\n if len (remain) > 0:\n confidence = get_support (item) / get_support (element)\n if confidence >= minConfidence:\n rules_to_return.append (((tuple (element), tuple (remain)),\n confidence))\n return items_to_return, rules_to_return\n\n\ndef read_csv(filename):\n file_iter = open (filename, 'r')\n for line in file_iter:\n line = line.strip ().rstrip (',') # Remove trailing comma\n record = frozenset (line.split (','))\n yield record\n\n\ndef print_freq_items(outdir, result):\n with open (outdir + os.sep + 'Output' + os.sep + 'frequent_itemset_result.txt', 'w') as f:\n for items in result:\n items = items[0]\n output_str = '{'\n for e in items:\n output_str += e\n output_str += ','\n\n output_str = output_str[:-1]\n output_str += '}\\n'\n f.write (output_str)\n\n\ndef print_rules(outdir, rules):\n with open (outdir + os.sep + 'Output' + os.sep + 'assoc-rule-result.txt', 'w') as f:\n for rule in rules:\n rule = rule[0]\n rule_left_side = rule[0]\n rule_right_side = rule[1]\n output_str = '{'\n for e in rule_left_side:\n output_str += e\n output_str += ','\n output_str = output_str[:-1]\n output_str += '} => {'\n for e in rule_right_side:\n output_str += e\n output_str += ','\n output_str = output_str[:-1]\n output_str += '}\\n'\n f.write (output_str)\n\n\nif __name__ == '__main__':\n\n datafilepath = r'C:\\Users\\Sanjay Saha\\CS5228-assignments\\assignment-1\\Association Rule Mining\\Data\\Groceries100.csv'\n # outdir = r'C:\\Users\\Sanjay Saha\\CS5228-assignments\\assignment-1\\Association Rule Mining'\n outdir = '.'\n\n if len (sys.argv) != 3 and len (sys.argv) != 4:\n print (\"Wrong command format, please follwoing the command format below:\")\n print (\"python assoc-rule-miner-template.py csv_filepath minsup\")\n print (\"python assoc-rule-miner-template.py csv_filepath minsup minconf\")\n exit (0)\n\n transactions = None\n minsup = 0.0\n minconf = 0.0\n if len (sys.argv) == 3:\n transactions = read_csv (sys.argv[1])\n minsup = float (sys.argv[2])\n minconf = 0.3\n elif len (sys.argv) == 4:\n transactions = read_csv (sys.argv[1])\n minsup = float (sys.argv[2])\n minconf = float (sys.argv[3])\n\n items, rules = generate_association_rules (transactions, minsup, minconf)\n\n if len (sys.argv) == 3:\n # Output frequent item-sets\n print_freq_items(outdir, items)\n elif len (sys.argv) == 4:\n # Output Rules to File\n print_rules(outdir, rules)\n\n","sub_path":"assignment-1/Association Rule Mining/assoc-rule-miner-template.py","file_name":"assoc-rule-miner-template.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"606843386","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.tree import DecisionTreeClassifier\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\n\ndef decision():\n data = pd.read_csv(\"http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt\")\n print(type(data))\n x=data[['pclass','age','sex']]\n x['age'].fillna(x['age'].mean(),inplace=True)\n x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.25)\n dict = DictVectorizer()\n x_train = dict.fit_transform(x_train.to_dict(orient='records'))\n x_test = dict.fit_transform(x_test.to_dict(orient='records'))\n # dtc = DecisionTreeClassifier()\n # dtc.fit(x_train,y_train)\n # y_precidt=dtc.predict(x_test)\n # print(y_precidt)\n # print(np.mean(y_test==y_precidt))\n # print(dtc.score(x_test,y_test))\n\n rfc = RandomForestClassifier()\n parser = {'n_estimators':[100,200,300,400,500],'max_depth':[3,6,9,12,16]}\n gc = GridSearchCV(estimator=rfc,param_grid=parser,cv=2)\n gc.fit(x_train,y_train)\n print('准确率为:',gc.score(x_test,y_test))\n print('交叉验证中最好测试结果:', gc.best_score_)\n print('最好参数模型:', gc.best_estimator_)\n print('每次后的训练集的准确率结果:', gc.cv_results_)\nif __name__ == '__main__':\n decision()","sub_path":"ML_01/decision_tree/simply.py","file_name":"simply.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"256799956","text":"from setuptools import setup\nimport socket\nimport glob\n\nhostname = socket.gethostname().lower()\n\nif hostname == 'hera-snap-head':\n scripts = ['scripts/hera_setup_new_node.py', 'scripts/hera_node_get_status.py',\n 'scripts/hera_upload_meta_to_redis.py']\nelif hostname == 'hera-corr-head':\n scripts = ['scripts/hera_node_get_status.py', 'scripts/hera_upload_meta_to_redis.py']\nelse:\n scripts = glob.glob('scripts/*')\n\nsetup(\n name='node_control',\n description='A node monitor and control interface',\n license='BSD',\n author='David DeBoer',\n author_email='ddeboer@berkeley.edu',\n url='https://github.com/hera_team/hera_node_mc.git',\n long_description=open('README.md').read(),\n package_dir={'node_control': 'node_control'},\n packages=['node_control'],\n scripts=scripts,\n use_scm_version=True,\n install_requires=['redis']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"245554665","text":"#coding=utf-8\n'''\nCreate on Dec 09,2015\n@author chenr\nods.hpf_contract_fix->edw.ht_ownership_his\n'''\n\nimport datetime\n\nfrom common.pgcomm import PGUtils\nfrom common.compare import cmp_update\nfrom common.sqlcomm import formatSql\nfrom settings import logger\n\nfrom transform.transcomm import MODE_FULL,MODE_INCREASE\n\nsource_flag = '03' #源系统标识\nSYS_LOG_TYPE_TRANSFORM = '001' #默认日志类型\n\ntruncate_sql = '''\nTRUNCATE TABLE edw.ht_ownership_his\n'''\n#全量转化sql\ntrans_sql = '''\nINSERT INTO edw.ht_ownership_his (\n source_id,\n batch_id,\n contract_id,\n contract_type,\n owner_id,\n br_id,\n subbranch_id\n)\nSELECT \n %s,\n %s,\n trim(contractID),\n trim(contractType),\n trim(ownerID),\n trim(brID),\n trim(subBranchID)\nFROM ods.hpf_contract_fix\n'''\n\ninsert_sql = '''\nINSERT INTO edw.ht_ownership_his (\n source_id,\n batch_id,\n contract_id,\n contract_type,\n owner_id,\n br_id,\n subbranch_id\n)VALUES(\n %s, %s, %s, %s, %s, %s, %s\n)\n'''\n\nselect_src_sql = '''\nSELECT \n %s,\n %s,\n trim(contractID),\n trim(contractType),\n trim(ownerID),\n trim(brID),\n trim(subBranchID)\nFROM ods.hpf_contract_fix\nWHERE createDate >= %s\n'''\n\nselect_cur_value_src = '''\nSELECT\n to_char(createdate,'YYYYMMDD')\nFROM\n ods.hpf_contract_fix\nWHERE \n createdate IS NOT NULL\nORDER BY \n createdate DESC\nLIMIT 1\n'''\n#全量转化\ndef full_trans(conn,batch_id):\n with conn.cursor() as cur:\n logger.info('执行清空目标表edw.ht_ownership_his')\n logger.debug(formatSql(truncate_sql))\n cur.execute(truncate_sql)\n logger.info('执行全量转化ods.hpf_contract_fix')\n logger.debug(formatSql(trans_sql)%(source_flag, batch_id))\n cur.execute(trans_sql, (source_flag, batch_id))\n \ndef sub_trans(conn,batch_id,cur_value_desc):\n with conn.cursor() as cur:\n logger.info('查询需要增量的记录')\n logger.debug(formatSql(select_src_sql)%(source_flag, batch_id, cur_value_desc,)) \n cur.execute(select_src_sql, (source_flag, batch_id, cur_value_desc,))\n logger.info('检查选定字段值以确定加载模式(新增或更新)')\n for record in cur:\n columnname = (\"contract_type\",\"owner_id\",\"br_id\",\"subbranch_id\",)\n cmp_update(conn, record[3:7], 'edw.ht_ownership_his', 'contract_id', record[2], insert_sql, columnname, record)\n \ndef transform(mode, batch_id, cur_value_desc):\n start = datetime.datetime.now()\n logger.info('start...合同历史关系归属表ods.hpf_contract_fix->edw.ht_ownership_his转化开始')\n pgObj = PGUtils()\n conn = pgObj.getConnection()\n try:\n with conn:\n if mode == MODE_FULL: #全量转化\n logger.info('当前模式:全量转化')\n full_trans(conn, batch_id)\n \n elif mode == MODE_INCREASE: #增量转化\n logger.info('当前模式:增量转化,目标表当前值%s'%cur_value_desc)\n sub_trans(conn, batch_id, cur_value_desc)\n finally:\n conn.close()\n end = datetime.datetime.now()\n logger.info('end...ods.hpf_contract_fix->edw.ht_ownership_his转化完成,耗时%s'%(end-start).seconds)\n \n return True\ndef get_last_update():\n pgObj = PGUtils()\n conn = pgObj.getConnection()\n #获取本表最后更新时间\n try:\n with conn:\n cur = conn.cursor()\n cur.execute(select_cur_value_src)\n cur_value_src = cur.fetchone()\n if cur_value_src:\n return cur_value_src[0]\n else:\n return ''\n finally:\n conn.close()\n\n","sub_path":"etl/datadeal/scripts/transform/trans_hpf_contract_fix.py","file_name":"trans_hpf_contract_fix.py","file_ext":"py","file_size_in_byte":3713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326058740","text":"import glob\nimport csv\nimport sys\nimport os\nimport codecs\nfrom collections import defaultdict\n\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nprint(os.getcwd())\n\np = '/Users/makidaisuke/Desktop/auto//before/*'\npath = glob.glob(p)\n\n# CSVファイルの読み込み\nfor f in path:\n user_list = []\n staff_list = []\n date_list = []\n\n print(f)\n with codecs.open(f, 'r', 'Shift-JIS', 'ignore') as csv_file:\n rows = csv.DictReader(csv_file)\n for row in rows:\n user = row['利用者名']\n staff = row['スタッフ名']\n date = row['サービス日付']\n user_list.append(user.replace('\\u3000', ''))\n staff_list.append(staff.replace('\\u3000', ''))\n date_list.append(date)\n\n first_date = date_list[0].replace('-', '_')\n last_date = date_list[-1].replace('-', '_').replace('2020_', '')\n\n d = defaultdict(list)\n for k, v in zip(user_list, staff_list):\n if v not in d[k] and not v == '':\n \n d[k].append(v)\n\n write_path = ('/Users/makidaisuke/Desktop/auto/after/' + first_date + '~' + last_date + '.csv')\n\n with open(write_path, mode='w', encoding='Shift_JIS') as f:\n fieldnames = ['利用者名', 'スタッフ名']\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n for key, value in d.items():\n writer.writerow({'利用者名': key, 'スタッフ名': value})\n\nprint('完了しました')\ninput('終了')","sub_path":"kanamic.py","file_name":"kanamic.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"265685683","text":"def countMatches(grid1, grid2):\n count = 0\n\n # bfs in-place, set visited as 0\n for i in range(len(grid1)):\n for j in range(len(grid1[0])):\n result1 = bfs(grid1, i, j)\n result2 = bfs(grid2, i, j)\n\n print(result1)\n print(result2)\n\n if match(result1, result2):\n count += 1\n\n return count\n\n\ndef bfs(grid, row, col):\n if grid[row][col] == 0:\n return []\n\n result = []\n\n import collections\n queue = collections.deque()\n queue.append((row, col))\n\n while queue:\n\n cell_row, cell_col = queue.popleft()\n # print(cell_row, cell_col)\n\n if cell_row >= 0 and cell_row < len(grid) and cell_col >= 0 and cell_col < len(grid[0]):\n cell = grid[cell_row][cell_col]\n else:\n continue\n\n if cell == 0:\n continue\n else:\n grid[cell_row][cell_col] = 0\n result.append((cell_row, cell_col))\n\n queue.append((cell_row - 1, cell_col))\n queue.append((cell_row, cell_col - 1))\n queue.append((cell_row + 1, cell_col))\n queue.append((cell_row, cell_col + 1))\n\n # print(queue)\n\n return result\n\n\ndef match(result1, result2):\n if len(result1) != len(result2):\n return False\n if not result1 or not result2:\n return False\n\n # print(result1, result2)\n for r1, r2 in zip(result1, result2):\n # print(r1, r2)\n if r1[0] != r2[0] or r1[1] != r2[1]:\n return False\n\n return True\n\n\ng1 = [\n [1, 0, 1, 1],\n [1, 0, 0, 1],\n [1, 0, 1, 0]\n]\ng2 = [\n [1, 0, 1, 1],\n [1, 0, 0, 1],\n [1, 0, 1, 0]\n]\n\nprint(countMatches(g1, g2))\n","sub_path":"twitter/countMatches.py","file_name":"countMatches.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"176782259","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import (division, print_function, absolute_import,\n unicode_literals)\n\n__all__ = [\"Model\"]\n\nimport numpy as np\n\n\nclass Model(object):\n \"\"\"\n A likelihood wrapper that combines a generative model and datasets to\n ...\n\n \"\"\"\n\n def __init__(self, planetary_system, datasets=[], parameters=[],\n priors=[]):\n self.planetary_system = planetary_system\n self.datasets = datasets\n self.parameters = parameters\n self.lnpriors = priors\n\n @property\n def vector(self):\n return np.concatenate(map(np.atleast_1d,\n [p.get() for p in self.parameters]))\n\n @vector.setter\n def vector(self, values):\n ind = 0\n for p in self.parameters:\n if len(p):\n p.set(values[ind:ind+len(p)])\n ind += len(p)\n else:\n p.set(values[ind])\n ind += 1\n\n def __call__(self, p):\n self.vector = p\n return self.lnprob()\n\n def lnprob(self):\n lp = self.lnprior()\n if not np.isfinite(lp):\n return -np.inf\n try:\n ll = self.lnlike()\n except RuntimeError:\n return -np.inf\n if not np.isfinite(ll):\n return -np.inf\n return lp + ll\n\n def lnprior(self):\n lp = (0.0 if self.planetary_system is None\n else self.planetary_system.lnprior())\n if not np.isfinite(lp):\n return -np.inf\n pp = [l() for l in self.lnpriors]\n if not np.all(np.isfinite(pp)):\n return -np.inf\n ppar = [p.lnprior() for p in self.parameters]\n if not np.all(np.isfinite(ppar)):\n return -np.inf\n return lp + np.sum(pp) + np.sum(ppar)\n\n def lnlike(self):\n return np.sum([d.lnlike(self.planetary_system\n .light_curve(d.time, texp=d.texp, tol=d.tol,\n maxdepth=d.maxdepth))\n for i, d in enumerate(self.datasets)])\n\n\nclass VariationalModel(Model):\n\n def __init__(self, **kwargs):\n super(VariationalModel, self).__init__(None, **kwargs)\n self.systems = []\n\n def add_dataset_system(self, dataset, system):\n self.systems.append((dataset, system))\n\n def lnlike(self):\n return np.sum([d.lnlike(s.light_curve(d.time, texp=d.texp, tol=d.tol,\n maxdepth=d.maxdepth))\n for i, (d, s) in enumerate(self.systems)])\n","sub_path":"bart/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"93152176","text":"#%%\nimport numpy as np\nimport cv2\n\nim_grasp = cv2.imread('./dataset/aff/hammer1_beige_000_grasp_0label.png')\nim_pound = cv2.imread('./dataset/aff/hammer1_beige_000_pound_2label.png')\nim_hand = cv2.imread('./dataset/aff/hammer1_beige_000_hand_1label.png')\nim_aff = cv2.imread('./dataset/aff/hammer1_beige_000_grasp_0label.png')\n\n#%%\ngreen = np.array([0, 255, 0])\nblue = np.array([255, 0, 0])\nwhite = np.array([255, 255, 255])\naff = cv2.imread('./dataset/aff/hammer1_beige_000_rgb.png')\ngrasp = cv2.imread('./dataset/aff/hammer1_beige_000_rgb.png')\npound = cv2.imread('./dataset/aff/hammer1_beige_000_rgb.png')\nhand = cv2.imread('./dataset/aff/hammer1_beige_000_rgb.png')\n\n#%%\nh, w, _ = im_pound.shape\nfor i in range(h):\n for j in range(w):\n if im_pound[i, j, 0] == 255 and im_pound[i, j, 1] == 0:\n im_pound[i, j] = [0, 0, 255]\n \n if im_aff[i, j, 2] == 255 and im_aff[i, j, 1] == 255 and im_aff[i, j, 0] == 255:\n im_aff[i, j] = [0, 0, 255]\n\n\na = cv2.addWeighted(grasp, 0.6, im_grasp, 0.4, 0)\nb = cv2.addWeighted(pound, 0.6, im_pound, 0.4, 0)\nc = cv2.addWeighted(hand, 0.6, im_hand, 0.4, 0)\nd = cv2.addWeighted(aff, 0.6, im_aff, 0.4, 0)\ncv2.imwrite('./dataset/aff/a.png', a)\ncv2.imwrite('./dataset/aff/b.png', b)\ncv2.imwrite('./dataset/aff/c.png', c)\ncv2.imwrite('./dataset/aff/d.png', d)\n\n\n#%%\nimport numpy as np\nimport cv2\nimport glob\n\n\n\n#%%\n\np = cv2.imread('./dataset/s/ladle2_green_060_task1.png')\n\nh, w, _ = p.shape\n\nfor i in range(h):\n for j in range(w):\n if p[i, j, 0] == 255 and p[i, j, 1] == 255 and p[i, j, 2] == 255:\n p[i, j] = [0, 255, 255]\n \ncv2.imwrite('./dataset/s/s2.png', p)\n\n#%%\n","sub_path":"make_pics.py","file_name":"make_pics.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"494996681","text":"# %load q02_best_k_features/build.py\n# Default imports\n\nimport pandas as pd\n\ndata = pd.read_csv('data/house_prices_multivariate.csv')\n\nfrom sklearn.feature_selection import SelectPercentile, f_regression, SelectFromModel\n\ndef percentile_k_features(df, k=20):\n selector = SelectPercentile(f_regression, percentile=k)\n X,y = df.iloc[:,:-1], df.iloc[:,-1]\n selector.fit(X,y)\n idx_selected = selector.get_support(indices=True)\n idx_sorted = [idx_selected for _, idx_selected in sorted(zip(selector.scores_[idx_selected], idx_selected), reverse=True)]\n features_train = df.iloc[:,idx_sorted]\n return list(features_train.columns.values)\n\n\n\n","sub_path":"q02_best_k_features/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"207320034","text":"import unittest\nfrom char_histogram import char_histogram\n\nclass TestCharHistogram(unittest.TestCase):\n\tdef test_result_of_function_with_expected_result(self):\n\t\t#ARRANGE\n\t\ttext = \"PytthonP\"\n\t\texpected = {'P':2, 'y': 1, 't':2, 'h':1, 'o':1, 'n':1}\n\n\t\t#ACT\n\t\tresult = char_histogram(text)\n\n\t\t#ASSERT\n\t\tself.assertEqual(result, expected)\n\tdef test_histogram_with_an_empty_string(self):\n\t\t#ARRANGE\n\t\ttext = \"\"\n\t\texpected = {}\n\n\t\t#ACT\n\t\tresult = char_histogram(text)\n\n\t\t#ASSERT\n\t\tself.assertEqual(result, expected)\n\tdef test_histogram_with_object_that_is_not_string(self):\n\t\t#ARRANGE\n\t\ttext = 32\n\t\texpected = {'3':1, '2':1}\n\n\t\t#ACT\n\t\tresult = char_histogram(text)\n\n\t\t#ASSERT\n\t\tself.assertEqual(result, expected)\n\nif __name__ == '__main__':\n\tunittest.main()","sub_path":"week1/lab1/char_histogram/test_char_histogram.py","file_name":"test_char_histogram.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"45928900","text":"import conversions\nimport unittest\n\nclass KnownValues(unittest.TestCase):\n known_values_ctk = ((100.0,373.15),\n (60.0,333.15),\n (20.0,293.15),\n (0.0,273.15))\n known_values_ctf = ((100.0,212.0),\n (60.0,140.0),\n (20.0,68.0),\n (0.0,32.0))\n known_values_ftk = ((212.0,373.15),\n (140.0,333.15),\n (68.0,293.15),\n (32.0,273.15))\n known_values_ftc = ((212.0,100.0),\n (140.0,60.0),\n (68.0,20.0),\n (32.0,0.0))\n known_values_ktf = ((373.15,212.0),\n (333.15,140.0),\n (293.15,68.0),\n (273.15,32.0))\n known_values_ktc = ((373.15,100.0),\n (333.15,60.0),\n (293.15,20.0),\n (273.15,0.0))\n\n\n def test_conversions_known_values_ctk(self):\n \"\"\"convertCelsiusToKelvin should give known values with known input\"\"\"\n for celsius, kelvin in self.known_values_ctk:\n result = conversions.convertCelsiusToKelvin(celsius)\n self.assertEqual(kelvin, result)\n\n\n def test_conversions_known_values_ctf(self):\n \"\"\"convertCelsiusToFahrenheit should give known values with known input\"\"\"\n for celsius, fahrenheit in self.known_values_ctf:\n result = conversions.convertCelsiusToFahrenheit(celsius)\n self.assertEqual(fahrenheit, result)\n\n\n def test_conversions_known_values_ftk(self):\n \"\"\"convertFahrenheitToKelvin should give known values with known input\"\"\"\n for fahrenheit, kelvin in self.known_values_ftk:\n result = conversions.convertFahrenheitToKelvin(fahrenheit)\n self.assertEqual(kelvin, result)\n\n\n def test_conversions_known_values_ftc(self):\n \"\"\"convertFahrenheitToCelsius should give known values with known input\"\"\"\n for fahrenheit, celsius in self.known_values_ftc:\n result = conversions.convertFahrenheitToCelsius(fahrenheit)\n self.assertEqual(celsius, result)\n\n\n def test_conversions_known_values_ktf(self):\n \"\"\"convertKelvinToFahrenheit should give known values with known input\"\"\"\n for kelvin, fahrenheit in self.known_values_ktf:\n result = conversions.convertKelvinToFahrenheit(kelvin)\n self.assertEqual(fahrenheit, result)\n\n\n def test_conversions_known_values_ktc(self):\n \"\"\"convertKelvinToCelsius should give known values with known input\"\"\"\n for kelvin, celsius in self.known_values_ktc:\n result = conversions.convertKelvinToCelsius(kelvin)\n self.assertEqual(celsius, result)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"50523557","text":"import os.path as osp\n\nimport torch\n\nfrom torch_geometric.datasets import QM9\nfrom torch_geometric.data import DataLoader\nfrom torch_geometric.nn import DimeNet\n\nfrom utils.utils_functions import get_n_params\n\npath = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'QM9')\ndataset = QM9(path)\n\n# DimeNet uses the atomization energy for targets U0, U, H, and G.\nidx = torch.tensor([0, 1, 2, 3, 4, 5, 6, 12, 13, 14, 15, 11])\ndataset.data.y = dataset.data.y[:, idx]\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nif __name__ == '__main__':\n for target in range(12):\n # Skip target \\delta\\epsilon, since it can be computed via\n # \\epsilon_{LUMO} - \\epsilon_{HOMO}.\n if target == 4:\n continue\n\n model, datasets = DimeNet.from_qm9_pretrained(path, dataset, target)\n with open(\"dimeNetParams.txt\", \"w\") as f:\n n_param, details = get_n_params(model)\n f.write(\"number of parameters: {}\\n\".format(n_param))\n for line in details.split(\"\\n\"):\n f.write(line + \"\\n\")\n exit()\n train_dataset, val_dataset, test_dataset = datasets\n\n model = model.to(device)\n loader = DataLoader(test_dataset, batch_size=32)\n\n maes = []\n for data in loader:\n data = data.to(device)\n with torch.no_grad():\n pred = model(data.z, data.pos, data.batch)\n mae = (pred.view(-1) - data.y[:, target]).abs()\n maes.append(mae)\n\n mae = torch.cat(maes, dim=0)\n\n # Report meV instead of eV.\n mae = 1000 * mae if target in [2, 3, 4, 6, 7, 8, 9, 10] else mae\n\n print(f'Target: {target:02d}, MAE: {mae.mean():.5f} ± {mae.std():.5f}')\n","sub_path":"deprecated_code/qm9dimenetTest.py","file_name":"qm9dimenetTest.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"566521322","text":"\nimport torch.nn as nn\nimport torchvision\nimport numpy as np\nimport logging\nimport argparse\nimport os\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nfrom torchvision.models import resnet18\n\nlog_dir = '/home/hackathon/hackathon/Code/'\n\nos.chdir(log_dir)\n\nimport utils\nimport evaluate\nimport data_loader\n\n\ndef train(model, optimizer, loss_fn, dataloader, params, img_count, cuda_present):\n \"\"\"Train the model on `num_steps` batches\n Args:\n model: (torch.nn.Module) the neural network\n optimizer: (torch.optim) optimizer for parameters of model\n loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch\n dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data\n metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch\n params: (Params) hyperparameters\n num_steps: (int) number of batches to train on, each of size params.batch_size\n \"\"\"\n \n # set model to training mode\n model.train()\n\n # summary for current training loop and a running average object for loss\n epoch_metric_summ = []\n summary_batch = {}\n \n epoch_metrics = {}\n \n loss_avg = utils.RunningAverage()\n loss_class_wts = torch.tensor(params.wts, dtype=torch.float32)\n \n threshold = params.threshold #threshold value above which a class is considered present\n \n y_pred = torch.zeros(img_count, params.class_count)\n y_true = torch.zeros(img_count, params.class_count)\n \n if cuda_present:\n loss_class_wts = loss_class_wts.cuda()\n k= 0\n \n for i, (train_batch, labels_batch) in enumerate(dataloader):\n \n batch_size = labels_batch.size()[0] \n \n #print(i, batch_size)\n y_true[k:k+ batch_size, :] = labels_batch #build entire array of predicted labels\n\n #If CUDA available, move data to GPU\n if cuda_present:\n train_batch = train_batch.cuda() #async=True)\n labels_batch = labels_batch.cuda() #async=True)\n \n # convert to torch Variables\n train_batch, labels_batch = Variable(train_batch), Variable(labels_batch)\n\n # compute model output and loss\n prim_out = model(train_batch)\n\n #Compute primary, Aux and total weighted loss\n loss =loss_fn(prim_out, labels_batch)\n\n #send the primary output after thresholding for metrics calc\n yp = ((prim_out > threshold).int()*1).cpu()\n y_pred[k:k+ batch_size, :] = yp #build entire array of predicted labels\n k += batch_size\n\n # clear previous gradients, compute gradients of all variables wrt loss\n optimizer.zero_grad()\n loss.backward()\n\n # performs updates using calculated gradients\n optimizer.step()\n\n # update the average loss\n loss_avg.update(loss.item())\n \n #Calculate the metrics of the entire training dataset\n #epoch_metrics = metrics(y_pred, y_true, threshold)\n epoch_metrics['loss'] = loss_avg()\n \n logging.info(\"Training error {}\".format(epoch_metrics['loss']))\n \n \ndef train_and_evaluate(params, dataloader, optimizer, scheduler, loss_fn, log_dir, cuda_present):\n \"\"\"Train the model and evaluate every epoch.\n Args:\n model: (torch.nn.Module) the neural network\n train_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches training data\n val_dataloader: (DataLoader) a torch.utils.data.DataLoader object that fetches validation data\n optimizer: (torch.optim) optimizer for parameters of model\n loss_fn: a function that takes batch_output and batch_labels and computes the loss for the batch\n metrics: (dict) a dictionary of functions that compute a metric using the output and labels of each batch\n params: (Params) hyperparameters\n model_dir: (string) directory containing config, weights and log\n restore_file: (string) optional- name of file to restore from (without its extension .pth.tar)\n \"\"\"\n \n best_val_acc = 10000000 \n \n for epoch in range(params.num_epochs):\n \n t0 = time.time()\n '''Do the following for every epoch'''\n # Run one epoch\n logging.info(\"Epoch {}/{}\".format(epoch + 1, params.num_epochs))\n \n train_image_dict = dataloader.load_data(\"train\", params)\n train_labels_dict = dataloader.load_labels(\"train\", params)\n train_img_count = len(train_image_dict)\n train_data_generator = dataloader.data_iterator(params, \"train\", train_image_dict, train_labels_dict)\n \n train_data_generator = dataloader.data_iterator(params, \"train\", train_image_dict, train_labels_dict)\n \n # compute number of batches in one epoch (one full pass over the training set)\n #train(model, optimizer, loss_fn, train_data_generator, params, train_img_count, cuda_present)\n\n # Evaluate for one epoch on validation set\n val_image_dict = dataloader.load_data(\"val\", params)\n val_labels_dict = dataloader.load_labels(\"val\", params)\n val_img_count = len(val_image_dict)\n val_data_generator = dataloader.data_iterator(params, \"val\", val_image_dict, val_labels_dict)\n (val_metrics, y_true, y_pred) = evaluate.evaluate(model, loss_fn, val_data_generator, params, val_img_count, cuda_present)\n \n val_acc = val_metrics['loss']\n is_best = val_acc < best_val_acc\n\n logging.info(\"y_true {}\".format(y_true))\n logging.info(\"y_pred {}\".format(y_pred))\n \n best_file_name = 'train3_resnet18_eval'\n \n if (is_best):\n best_val_acc = val_acc\n utils.save_checkpoint({'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'optim_dict' : optimizer.state_dict()},\n is_best,\n log_dir, best_file_name)\n t1 = time.time()\n logging.info(\"Time taken for this epoch = {}\".format(t1-t0))\n logging.info(\"Validation error after epoch {}, {}\".format(epoch, val_acc))\n \n return (y_true, y_pred)\n \nif __name__ == \"__main__\":\n \n \n print('first line in main')\n \n mymodel = resnet18(pretrained= True)\n mymodel.fc = nn.Linear(512, 1) \n modelpath = '/home/hackathon/hackathon/Code/logs/best_weights_train3_resnet18'\n checkpoint = torch.load(modelpath)\n mymodel.load_state_dict(checkpoint['state_dict'])\n \n log_dir = '/home/hackathon/hackathon/Code/logs'\n json_path = '/home/hackathon/hackathon/Code/params3.json'\n # Set the logger\n utils.set_logger(os.path.join(log_dir, 'train3_resnet18_eval.log'))\n\n #Read params file\n params = utils.Params(json_path)\n\n #Generate Dataloader\n logging.info(\"Generating the dataloader\")\n dataloader = data_loader.Dataloader(params) \n logging.info(\"Done loading the Dataloader\")\n\n # use GPU if available\n cuda_present = torch.cuda.is_available() #Boolean\n\n if cuda_present:\n logging.info(\"using CUDA\")\n else:\n logging.info(\"cuda not available, using CPU\")\n\n logging.info(\"Loading model and weights\")\n\n # Change the following 1 lines for new models\n #model = net.myDensenet169(model_dir, params.class_count)\n model = mymodel\n logging.info(\"Transferring model to GPU if CUDA available\")\n for param in model.parameters():\n param.requires_grad = True\n if cuda_present:\n model = model.cuda()\n\n #Specify the loss function Optimizer \n optimizer = optim.Adam(model.parameters(), lr = params.learning_rate)\n scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=3, gamma=0.7)\n \n #loss_fn = nn.BCEWithLogitsLoss() # moving to net.py\n loss_fn = nn.MSELoss()\n\n # Train and Evaluate the model\n logging.info(\"Starting training for {} epoch(s)\".format(params.num_epochs))\n #train_and_evaluate(params, image_dict, labels_dict)\n (y_true, y_pred) = train_and_evaluate(params, dataloader, optimizer, scheduler, loss_fn, log_dir, cuda_present)\n","sub_path":"train3_resnet18.py","file_name":"train3_resnet18.py","file_ext":"py","file_size_in_byte":8242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"573038394","text":"import numpy as np\nimport math\nfrom scipy import special\n\n\nclass Variogram_Wrapper:\n \"\"\"\n The Variogram Wrapper should be used to decorate the mathematical expression of the variogram function. The typical\n signature is\n\n func(h, *args)\n\n When decorated by Variogram_Wrapper, func will accept iterables as well and will be called in a list comprehension.\n The Wrapper preserves the original __name__ attribute of the function.\n\n \"\"\"\n\n def __init__(self, func):\n \"\"\"\n\n :param func:\n \"\"\"\n self.func = func\n self.__name__ = func.__name__\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n if hasattr(args[0], '__iter__'):\n # this is an iterable\n new_args = args[1:]\n return np.array([self.func(value, *new_args, **kwargs) for value in args[0]])\n# return np.fromiter(map(self.func, args[0], *new_args, **kwargs), dtype=np.float)\n else:\n return self.func(*args, **kwargs)\n\n# --- generic theoretical Variogram Models --- #\n\n\n@Variogram_Wrapper\n# if numba is installed uncommment\n# @jit\ndef spherical(h, a, C0, b=0):\n \"\"\"\n The Spherical variogram function.\n\n For the function definition see:\n Burgess, T. M., & Webster, R. (1980). Optimal interpolation and isarithmic mapping of soil properties. I.\n The semi-variogram and punctual kriging. Journal of Soil and Science, 31(2), 315–331, 7 figs, 1 table, 27 refs.\n http://doi.org/10.1111/j.1365-2389.1980.tb02084.x\n\n :param h: the separation lag\n :param a: the range parameter (not effective range!)\n :param C0: the Variogram sill\n :param b: the Variogram nugget\n\n :return: float, or list of; the semivariance at separation lag h\n \"\"\"\n # prepare parameters\n r = a / 1.\n C0 -= b\n\n if h <= a:\n return b + C0 * ((1.5 * (h / r)) - (0.5 * ((h / r)**3.0)))\n else:\n return b + C0\n\n\n@Variogram_Wrapper\ndef exponential(h, a, C0, b=0):\n \"\"\"\n The Exponential variogram function.\n\n :param h: the separation lag\n :param a: the range parameter (not effective range!)\n :param C0: the Variogram sill\n :param b: the Variogram nugget\n\n :return: float, or list of; the semivariance at separation lag h\n \"\"\"\n # prepare parameters\n r = a / 3.\n C0 -= 0\n\n try:\n return b + C0 * (1. - math.exp(-(h / r)))\n except:\n return b + C0\n\n@Variogram_Wrapper\n# if numba is installed uncommment\n# @jit\ndef gaussian(h, a, C0, b=0):\n \"\"\"\n The Gaussian variogram function.\n\n :param h: the separation lag\n :param a: the range parameter (not effective range!)\n :param C0: the Variogram sill\n :param b: the Variogram nugget\n\n :return: float, or list of; the semivariance at separation lag h\n \"\"\"\n # prepare parameters\n r = a / np.sqrt(3)\n C0 -= b\n\n return b + C0 * (1. - math.exp(- (h ** 2 / r ** 2)))\n\n\n@Variogram_Wrapper\n# if numba is installed uncommment\n# @jit\ndef cubic(h, a, C0, b=0):\n \"\"\"\n The Cubic Variogram function\n\n :param h: the separation lag\n :param a: the range parameter (not effective range!)\n :param C0: the Variogram sill\n :param b: the Variogram nugget\n \"\"\"\n # prepare parameters\n C0 -= b\n\n if h <= a:\n return b + C0 * ( (7*(h**2 / a**2)) - ((35/4)*(h**3/a**3)) + ((7/2)*(h**5/a**5)) - ((3/4)*(h**7/a**7)) )\n else:\n return b + C0\n\n\n@Variogram_Wrapper\n# if numba is installed uncommment\n# @jit\ndef stable(h, a, C0, s, b=0):\n \"\"\"\n The Stable Variogram function.\n\n :param h:\n :param a:\n :param C0:\n :param s:\n :param b:\n :return:\n \"\"\"\n # prepare parameters\n r = a * math.pow(3, 1 / s)\n C0 -= b\n\n if s > 2:\n s = 2\n return b + C0 * (1. - math.exp(- math.pow(h / r, s)) )\n\n\n@Variogram_Wrapper\n# if numba is installed uncommment\n# @jit\ndef matern(h, a, C0, s, b=0):\n \"\"\"\n The Matérn model.\n\n For Matérn function see:\n Minasny, B., & McBratney, A. B. (2005). The Matérn function as a general model for soil variograms.\n Geoderma, 128(3–4 SPEC. ISS.), 192–207. http://doi.org/10.1016/j.geoderma.2005.04.003.\n\n :param h: lag\n :param a: range\n :param C0: sill\n :param s: smoothness parameter\n :param b: nugget\n :return:\n \"\"\"\n # prepare parameters\n r = a\n C0 -= b\n\n return b + C0 * (1 - ( (1 / (np.power(2, s - 1) * special.gamma(s))) * np.power(h / r, s) * special.kv(s, h / r) ))\n\n\n# --- Adaptions using no nugget effect --- #\ndef debug_spherical(h, a, C0):\n if isinstance(h, list) or isinstance(h, np.ndarray):\n return np.array([debug_spherical(_, a, C0) for _ in h])\n else:\n if h <= a:\n return C0 * ((1.5*h/a) - (0.5*(h/a)))**3\n else:\n return C0","sub_path":"skgstat/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"583274398","text":"import boto3\nimport base64\nimport json\nimport pulumi_azure as azure\nfrom typing import Dict\n\n\ndef get_secret() -> Dict:\n \"\"\"Obtain the secrets in the AWS Secrets Manager\n Returns:\n {\n \"PULUMI\": {\n \"SERVICE_BUS_RESOURCE_GROUP\": str,\n \"SERVICE_BUS_NAMESPACE\": str,\n \"DEADLETTER_WATCHER_ENDPOINT\": str,\n \"SERVICE_BUS_QUEUES\":[ str ]\n },\n \"DL-WATCHER\": {\n \"SLACK_CHANNEL\": str,\n \"BOT_TOKEN\": str\n }\n }\n \"\"\"\n session = boto3.session.Session()\n client = session.client(service_name='secretsmanager',\n region_name=\"eu-west-2\")\n\n get_secret_value_response = client.get_secret_value(\n SecretId=\"deadletter-watcher-secrets\")\n\n if 'SecretString' in get_secret_value_response:\n return get_secret_value_response['SecretString']\n else:\n decoded_binary_secret = base64.b64decode(\n get_secret_value_response['SecretBinary'])\n return decoded_binary_secret\n\n\ndef get_service_bus_connection(namespace, resource_group):\n \"\"\"\n Retrieves service bus connection string for a given namespace and resource group\n Args:\n namespace:\n resource_group:\n\n Returns:\n service bus connection string.\n \"\"\"\n sb = azure.servicebus.get_namespace(name=namespace,\n resource_group_name=resource_group)\n return sb.default_primary_connection_string\n\n\ndef update_secret(sb_list):\n \"\"\"\n Updates secrets in AWS based on service bus list details\n Args:\n sb_list:\n\n Returns:\n\n \"\"\"\n if sb_list:\n service_bus_details = json.loads(sb_list)\n for sb in service_bus_details:\n sb_conn = get_service_bus_connection(sb['namespace'], sb['resourceGroup'])\n sb['connection_str'] = sb_conn\n print(\"successfully retrieved connection str\")\n print(sb_conn)\n print(service_bus_details)\n\n","sub_path":"event_trigger/secrets.py","file_name":"secrets.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"260924689","text":"from django.shortcuts import render\r\nfrom .models import DesktopSoftware, MobileSoftware\r\nfrom home.views import Banner\r\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\r\nfrom .forms import DesktopForm\r\n\r\n\r\nclass DesktopSoftwareListView(Banner, ListView):\r\n model = DesktopSoftware\r\n template_name = 'desktop.html'\r\n context_object_name = 'apps'\r\n ordering = ['-date_posted']\r\n\r\n\r\nclass DesktopSoftwareDetailView(Banner, DetailView):\r\n model = DesktopSoftware\r\n template_name = 'desktop-detail.html'\r\n\r\n\r\nclass DesktopSoftwareCreateView(CreateView):\r\n model = DesktopSoftware\r\n form_class = DesktopForm\r\n template_name = 'create-form.html'\r\n\r\n def form_valid(self, form):\r\n form.instance.author = self.request.user\r\n return super().form_valid(form)\r\n\r\n\r\nclass DesktopSoftwareUpdateView(Banner, UpdateView):\r\n model = DesktopSoftware\r\n form_class = DesktopForm\r\n template_name = 'create-form.html'\r\n\r\n def form_valid(self, form):\r\n form.instance.author = self.request.user\r\n return super().form_valid(form)\r\n\r\n def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False\r\n","sub_path":"software/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"355764827","text":"# 91群爬虫精进阶段\r\n# 第1节 了解爬⾍和浏览器的原理习题\r\n\r\n# 1、题⽬要求:\r\n# 请使⽤requests爬取⽂章,开掘⼀座⽂学理论富矿\r\n# 并使⽤open⽅法将爬取的数据写⼊⽂件data.txt中\r\n# 地址为:https://baijiahao.baidu.com/s?id=1661382527708632196&wfr=spider&for=pc\r\n\r\nimport requests as r\r\n\r\ntxt_url = 'https://baijiahao.baidu.com/s?id=1661382527708632196&wfr=spider&for=pc'\r\nres = r.get(txt_url)\r\nf = open('data.txt','w')\r\nf.write(res.text)\r\nf.close()\r\n\r\n# 2、题⽬要求:\r\n# 请使⽤request s请求百度logo图⽚ 并且使⽤open⽅法将图⽚保存下来\r\n# 温馨提示:\r\n# 1.结合open函数的使⽤将图⽚写⼊到本地(任意路径下都可以)\r\n# 2.wb为⼆进制写⼊\r\n# 地址为:https://www.baidu.com/img/bd_logo1.png\r\n\r\npic_url = 'https://www.baidu.com/img/bd_logo1.png'\r\nres = r.get(pic_url)\r\nf = open('pic.png','wb')\r\nf.write(res.content)\r\nf.close()","sub_path":"01/Crawler01_Exercise.py","file_name":"Crawler01_Exercise.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"156906713","text":"from datetime import datetime\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nfrom random import randrange\nimport time\n\nclass Plot:\n def __init__(self):\n self.x_data, self.y_data = [], []\n self.figure = plt.figure()\n self.line = plt.plot_date(self.x_data, self.y_data, '-')\n self.animation = None\n\n def __update(self, frame):\n self.line.set_data(self.x_data, self.y_data)\n self.figure.gca().relim()\n self.figure.gca().autoscale_view()\n\n def update(self, x, y, set_new=False):\n if self.animation == None:\n self.animation = FuncAnimation(self.figure, self.__update, interval=200)\n plt.show()\n if not set_new:\n self.x_data.append(x)\n self.y_data.append(y)\n else:\n self.x_data = x\n self.y_data = y\n\n","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"117498971","text":"from flask import render_template, url_for, redirect\nfrom flask_login import login_required\n\nfrom app import db\nfrom app.models import Genre\nfrom app.genre import bp\nfrom .forms import AddGenreForm, EditGenreForm\n\n@bp.route('/')\n@login_required\ndef genre_list():\n ''' A route for a list of all the genres in the collection. '''\n genres = Genre.query.all()\n return render_template('genre_list.html', title = 'Genres', genres = genres)\n\n@bp.route('/add', methods = ['GET', 'POST'])\n@login_required\ndef genre_add():\n ''' A route for showing a form and processing form for adding a new genre. '''\n form = AddGenreForm()\n\n # When the form has been processed with no errors, save the new genre to database\n if form.validate_on_submit():\n genre = Genre()\n form.populate_obj(obj=genre)\n db.session.add(genre)\n db.session.commit()\n # Once the new genre has been saved, return back to the view of all genres\n return redirect(url_for('genre.genre_list'))\n return render_template('genre_add.html', form = form, title = 'Add genre')\n\n@bp.route('/')\n@login_required\ndef genre_details(id):\n ''' A route to display details for a specific genre, for the given id. '''\n genre = Genre.query.get_or_404(id)\n return render_template('genre_details.html', title = 'Genre details', genre = genre)\n\n@bp.route('//delete')\n@login_required\ndef genre_delete(id):\n ''' A route that deletes a genre for the given id. '''\n\n genre = Genre.query.get_or_404(id)\n db.session.delete(genre)\n db.session.commit()\n\n # Once the genre record has been deleted, return back to the list of the genres\n return redirect(url_for('genre.genre_list'))\n\n@bp.route('//edit', methods = ['GET', 'POST'])\n@login_required\ndef genre_edit(id):\n ''' A route for displaying and processing a form, when editing a genre. '''\n \n genre = Genre.query.get_or_404(id)\n form = EditGenreForm(obj=genre)\n\n # When the form has been completed correctly, the changes to the genre are saved\n if form.validate_on_submit():\n form.populate_obj(obj=genre)\n db.session.commit()\n # Once the changes have been saved in database, show the view of the details for the genre\n return redirect(url_for('genre.genre_details', id = genre.id))\n \n # When the request is a GET or there are errors in the form, return the view with the form\n return render_template('genre_edit.html', title = 'Edit genre', form = form, genre = genre)","sub_path":"app/genre/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"439998622","text":"# -*- coding: utf-8 -*-\nfrom django.db import models\n\nclass ItemAvaliacaoManager(models.Manager):\n\n\tdef get_items_avaliacao(self, projeto_id, cliente_id):\n\t\t\n\t\tfrom django.db import connection\n\t\t\n\t\tcursor = connection.cursor()\n\t\t\n\t\tcursor.execute(\n\t\t\t\"\"\"\n\t\t\tselect\n\t\t\t\tia.id as item_avaliativo_id,\n\t\t\t\tia.nome as nome,\n\t\t\t\t\n\t\t\t\tcoalesce (\n\t\t\t\t(select i.id from core_avaliacao a right join core_itemavaliacao i on i.avaliacao_id = a.id\n\t\t\t\twhere item_avaliativo_id = ia.id and a.projeto_id = p.id and a.cliente_id= pc.cliente_id), 0\n\t\t\t\t) as item_avaliacao_id,\n\t\t\t\t\n\t\t\t\tcoalesce (\n\t\t\t\t(select resposta from core_avaliacao a right join core_itemavaliacao i on i.avaliacao_id = a.id\n\t\t\t\twhere item_avaliativo_id = ia.id and a.projeto_id = p.id and a.cliente_id= pc.cliente_id), 0\n\t\t\t\t) as resposta\n\t\t\t\n\t\t\tfrom core_projeto p\n\t\t\tinner join core_projeto_grupo_avaliativo gap on gap.projeto_id = p.id \n\t\t\tinner join core_grupoavaliativo ga on ga.id = gap.grupoavaliativo_id\n\t\t\tinner join core_itemavaliativo ia on ia.grupo_avaliativo_id = ga.id \n\t\t\tinner join core_projeto_cliente pc on pc.projeto_id = p.id\n\t\t\twhere p.id = %d and pc.cliente_id = %d\n\t\t\t\"\"\" % (projeto_id, cliente_id)\n\t\t)\n\n\t\tresult = []\n\n\t\tfor row in cursor.fetchall():\n\t\t\titem_avaliacao = None\n\t\t\tif int(row[2]) > 0:\n\t\t\t\titem_avaliacao = self.model.objects.get(pk=int(row[2]))\n\t\t\t\n\t\t\tif item_avaliacao:\n\t\t\t\tresult.append(item_avaliacao)\n\t\t\telse:\n\t\t\t\titem_avaliativo = self.model.item_avaliativo.get_query_set().get(pk=int(row[0])) \n\t\t\t\titem_avaliacao = self.model(item_avaliativo=item_avaliativo, resposta=0)\n\t\t\t\tresult.append(item_avaliacao)\n\n\t\treturn result\n\n\nclass AvaliacaoManager(models.Manager):\n\n\tdef get_resume_by_project(self, projeto_id):\n\n\t\tfrom django.db import connection\n\n\t\tcursor = connection.cursor()\n\n\t\tcursor.execute(\n\t\t\t\"\"\"\n\t\t\tselect\n\t\t\t\ti.id as item_avaliativo_id,\n\t\t\t\ti.nome as nome,\n\t\t\t\ta.projeto_id,\n\t\t\t\tcoalesce (\n\t\t\t\t(select count(*) \n\t\t\t\tfrom core_avaliacao ca right join core_itemavaliacao ci on ci.avaliacao_id = ca.id\n\t\t\t\twhere \n\t\t\t\tci.item_avaliativo_id = i.id and\n\t\t\t\ta.projeto_id=ca.projeto_id and\n\t\t\t\tci.resposta = 1), 0\n\t\t\t\t) as ruim,\n\n\t\t\t\tcoalesce (\n\t\t\t\t(select count(*) \n\t\t\t\tfrom core_avaliacao ca right join core_itemavaliacao ci on ci.avaliacao_id = ca.id\n\t\t\t\twhere \n\t\t\t\tci.item_avaliativo_id = i.id and\n\t\t\t\ta.projeto_id=ca.projeto_id and\n\t\t\t\tci.resposta = 2), 0\n\t\t\t\t) as regular,\n\n\t\t\t\tcoalesce (\n\t\t\t\t(select count(*) \n\t\t\t\tfrom core_avaliacao ca right join core_itemavaliacao ci on ci.avaliacao_id = ca.id\n\t\t\t\twhere \n\t\t\t\tci.item_avaliativo_id = i.id and\n\t\t\t\ta.projeto_id=ca.projeto_id and\n\t\t\t\tci.resposta = 3), 0\n\t\t\t\t) as bom,\n\t\t\t\tcoalesce (\n\t\t\t\t(select count(*) \n\t\t\t\tfrom core_avaliacao ca right join core_itemavaliacao ci on ci.avaliacao_id = ca.id\n\t\t\t\twhere \n\t\t\t\tci.item_avaliativo_id = i.id and\n\t\t\t\ta.projeto_id=ca.projeto_id and\n\t\t\t\tci.resposta = 4), 0\n\t\t\t\t) as otimo\n\t\t\tfrom core_avaliacao a\n\t\t\tinner join core_itemavaliacao ia on ia.avaliacao_id = a.id\n\t\t\tinner join core_itemavaliativo i on i.id = ia.item_avaliativo_id\n\t\t\twhere a.projeto_id= %d\n\t\t\tgroup by i.id,a.projeto_id\n\t\t\torder by a.projeto_id\"\"\" % projeto_id\n\t\t)\n\t\t\n\t\tresumo = cursor.fetchall()\n\n\t\treturn resumo","sub_path":"avaliacao/core/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228955254","text":"from datetime import datetime\r\nfrom random import randint\r\nimport time\r\nodds = [1, 3, 5, 7, 9, 11,\r\n 13, 15, 17, 19, 21,\r\n 23, 25, 27, 29, 31, 33,\r\n 35, 37, 39, 41, 43,\r\n 45, 47, 49, 51, 53, 55, 57, 59]\r\nfor i in range(5):\r\n right_this_minute = datetime.today().minute\r\n if right_this_minute in odds:\r\n print('This is minute seems a little odd')\r\n else:\r\n print('Not an odd minute.')\r\n wait = randint(1, 60)\r\n time.sleep(wait)","sub_path":"from books/learn prog. with python/odd2.py","file_name":"odd2.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"111850265","text":"#!flask/bin/python\nimport os\nfrom datetime import datetime\nfrom . import utils\n\nfrom flask import Flask, abort, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' \\\n + os.path.join(basedir, 'app.db')\nSQLALCHEMY_TRACK_MODIFICATIONS = False\n\ndb = SQLAlchemy(app)\nfrom service.models import *\n\n\n# Task 1\n@app.route('/imports', methods=['POST'])\ndef create_import():\n new_import = DataImport(date_created=datetime.now())\n db.session.add(new_import)\n db.session.commit()\n new_import = db.session.query(DataImport).order_by(\n DataImport.id.desc()).first()\n data = request.get_json()\n relations = {}\n a = []\n try:\n for citizen in data['citizens']:\n if citizen['citizen_id'] in a:\n abort(400)\n a.append(citizen['citizen_id'])\n except KeyError:\n abort(400)\n\n for citizen in data['citizens']:\n if Citizen.validate(citizen, True):\n new_citizen = Citizen(\n citizen_id=citizen['citizen_id'],\n town=citizen['town'],\n street=citizen['street'],\n building=citizen['building'],\n apartment=citizen['apartment'],\n name=citizen['name'],\n birth_date=datetime.strptime(citizen['birth_date'], '%d.%m.%Y'),\n gender=citizen['gender'],\n import_id=new_import.id)\n relations[citizen['citizen_id']] = citizen['relatives']\n db.session.add(new_citizen)\n else:\n abort(400)\n\n for first in relations:\n for second in relations[first]:\n if first not in relations[second]:\n print('Wrong c2c relations')\n abort(400)\n\n db.session.commit()\n for cit_id in relations.keys():\n citizen = db.session.query(Citizen).filter_by(citizen_id=cit_id,\n import_id=new_import.id).first()\n for second in relations[cit_id]:\n relative = db.session.query(Citizen).filter_by(\n citizen_id=second, import_id=new_import.id).first()\n citizen.add_relative(relative)\n db.session.commit()\n result = {'data': {\n 'import_id': new_import.id\n }}\n\n return jsonify(result), 201\n\n\n# Task 2\n@app.route('/imports//citizens/',\n methods=['PATCH'])\ndef patch_import(import_id1, citizen_id1):\n if not isinstance(import_id1, int) and import_id1 < 0 and not isinstance(\n citizen_id1, int) and citizen_id1 < 0:\n abort(400)\n data = request.get_json()\n citizen = db.session.query(Citizen).filter_by(import_id=import_id1,\n citizen_id=citizen_id1).first()\n if citizen is None:\n abort(404)\n if Citizen.validate(data, False):\n citizen.update(data)\n result = {'data': citizen.serialize()}\n else:\n abort(400)\n return jsonify(result), 200\n\n\n# Task 3\n@app.route('/imports//citizens', methods=['GET'])\ndef get_import(import_id1):\n if not isinstance(import_id1, int) and import_id1 < 0:\n print(\"Fail trying get import by id:\", import_id1)\n abort(400)\n data_import = db.session.query(Citizen).filter_by(\n import_id=import_id1).all()\n if len(data_import) == 0:\n print(\"Fail trying get import by id:\", import_id1)\n abort(400)\n result = {'data': []}\n for citizen in data_import:\n result['data'].append(citizen.serialize())\n return jsonify(result), 200\n\n\n# Task 4\n@app.route('/imports//citizens/birthdays', methods=['GET'])\ndef get_birthdays(import_id1):\n if not isinstance(import_id1, int) and import_id1 < 0:\n abort(404)\n presents = {key: list() for key in range(1, 13)}\n data_import = db.session.query(Citizen).filter_by(\n import_id=import_id1).all()\n if len(data_import) == 0:\n abort(404)\n for citizen in data_import:\n months = {key: int() for key in range(1, 13)}\n for rel in citizen.relatives.all():\n months[rel.birth_date.month] += 1\n for month in months.keys():\n pres = {'citizen_id': citizen.citizen_id,\n 'presents': months[month]}\n if pres['presents'] != 0:\n presents[month].append(pres)\n\n result = {}\n for i in presents.keys():\n result[str(i)] = presents[i]\n\n return dict(data=result), 200\n\n\n# Task 5\n@app.route(\n '/imports//towns/stat/percentile/age', methods=['GET'])\ndef get_statistics(import_id1):\n if not isinstance(import_id1, int) and import_id1 < 0:\n print(\"Fail trying get import by id:\", import_id1)\n abort(400)\n citizens = db.session.query(Citizen).filter_by(\n import_id=import_id1).all()\n\n if len(citizens) == 0:\n print(\"Fail trying get import by id:\", import_id1)\n abort(400)\n towns = list(set([citizen.town for citizen in citizens]))\n data = [dict(town=town, ages=list()) for town in towns]\n\n for citizen in citizens:\n birth_date = citizen.birth_date\n age = utils.calculate_age(birth_date)\n town_statistics = [item for item in data if\n item['town'] == citizen.town][0]\n index = data.index(town_statistics)\n town_statistics['ages'].append(age)\n data[index] = town_statistics\n\n for town_statistics in data:\n ages = town_statistics.pop('ages')\n for percent in [50, 75, 99]:\n town_statistics[f'p{percent}'] = utils.percentile(ages, percent)\n\n return dict(data=data), 200\n","sub_path":"service/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"211838751","text":"import os\nimport json\n# import pandas as pd\nfrom datetime import datetime\nfrom src.main.pre_work import Work\nfrom lib.file_management.extract import unzipfile\nfrom src.main.student_data import StudentData\nfrom lib.file_management.configeditor import ConfigEditor\nfrom lib.function_network.func_network import CallApi\nfrom lib.file_management.createapikeyfile import SaveApiKey\nfrom lib.cli_displayed.dis_cli import display_typo\n\n\ndef check_config(path):\n if not os.path.exists(os.path.join(path, \"ta\", \"config.json\")):\n return False\n else:\n return True\n\n\ndef check_draft(path):\n if not os.path.exists(os.path.join(path, \"ta\", \"draft.json\")) and not SaveApiKey().exsitapikey():\n return False\n else:\n return True\n\n\ndef check_state(config_state, draft_state, path):\n if config_state and draft_state:\n return True\n else:\n display_typo(1, (config_state and draft_state), \"Property is not ready please try again\",\n optional_massage=f\"CONFIG : {config_state} / DRAFT : {draft_state} / API-KEY : {SaveApiKey().exsitapikey()}\")\n print(\"[*]\")\n return False\n\n\ndef preparework(path):\n config_state = check_config(path)\n draft_state = check_draft(path)\n display_typo(1, config_state, \"checking config.json\")\n display_typo(1, draft_state, \"checking draft.json\")\n\n if not check_state(config_state, draft_state, path):\n return False\n return True\n\n\ndef draft_config(path):\n print(\"Do you want to use draft from draft.json or fetch from the server\")\n while True:\n user_in = input(\"(R)ead from file or (F)etch from server: \")\n if user_in.lower() in \"RrFf\":\n break\n if user_in.lower() == \"f\":\n draft = CallApi(path).fetch()\n print(draft)\n else:\n with open(os.path.join(path, \"ta\", \"draft.json\"), \"r\") as draftfile:\n draft = json.load(draftfile)\n draftfile.close()\n return draft\n\n\ndef add_data_to_work(path, draft):\n work = Work()\n work.draft = draft\n work.path = path\n work.workId = ConfigEditor(path=path).readconfig()[\"workId\"]\n if work.property_is_ready():\n work_path = os.path.join(path, \"ta\", \"work.json\")\n if work.create():\n print(f\" |-[/] {work_path} created\")\n else:\n print(f\" |-[X] {work_path} already exists\")\n else:\n print(\"property is not ready\")\n print(work.draft)\n print(work.path) \n print(work.workId)\n return False, None\n return True, work\n\n\ndef unzip_homework(path, draft):\n if not unzipfile(path, draft[\"fileDraft\"]):\n print(\"[*] all file aren't follow the draft\")\n return False\n print(\"[/] finish\")\n return True\n\n\ndef student_checking(path, work, file, openvs, onebyone):\n student = StudentData(path=work.path, filename=file, draft=work.draft)\n with open(os.path.join(path, \"ta\", \"work.json\"), \"r\") as workfile:\n scores = json.load(workfile)[\"scores\"]\n workfile.close\n student.prepare_student_data()\n did_student_checked(path,work, file, student, scores, openvs, onebyone)\n\n\ndef did_student_checked(path,work, file, student, scores, openvs, onebyone):\n if student.check_work_score(scores):\n if openvs and onebyone:\n assignmentpath = os.path.join(path,\"ta\", \"Assignment\", file)\n print(assignmentpath)\n os.system(f\"code \\\"{assignmentpath}\\\"\")\n work.write_work(student.ask())\n\n\ndef scoring(path, work, openvs, onebyone):\n list_file = os.listdir(os.path.join(path, \"ta\", \"Assignment\"))\n assignmentpath = os.path.join(\"ta\", \"Assignment\")\n if openvs and not onebyone:\n os.system(f\"code \\\"{assignmentpath}\\\"\")\n for file in list_file:\n if \".\" in file or file == \"ta\":\n continue\n student_checking(path, work, file, openvs, onebyone)\n\n\ndef run_work(path, openvs=True, onebyone=False):\n print(\"[*] starting...\")\n if not preparework(path):\n return False\n draft = draft_config(path)\n workstate, work = add_data_to_work(path, draft)\n if not workstate:\n return False\n if not unzip_homework(path, draft):\n return False\n scoring(path, work, openvs, onebyone)\n return True\n","sub_path":"src/main/run_work.py","file_name":"run_work.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"62759037","text":"#!/bin/env python3\n\nimport math\n\n\n\ndef is_prime(n):\n \"\"\"\n Primes checker\n \"\"\"\n i = 2\n #if n < 0:\n # raise ValueError('Number must be bigger than 0')\n \n if n == 2: \n return True\n while i <= math.sqrt(n):\n if n % i == 0:\n return False\n i += 1\n return True\n\n\n\ndef factorial(n):\n \"\"\"\n Homemade factorial calculator\n \"\"\"\n if n <= 0:\n return 0\n fact = 1\n for i in range(1, n + 1, 1):\n fact *= i\n return fact\n\n\ndef primes_sieve2(limit):\n \"\"\" https://stackoverflow.com/questions/3939660/sieve-of-eratosthenes-finding-primes-python\n Reference: https://stackoverflow.com/a/3941967/1770460\n \"\"\"\n a = [True] * limit \n a[0] = a[1] = False\n\n for (i, isprime) in enumerate(a):\n if isprime:\n yield i\n for n in range(i*i, limit, i):\n a[n] = False\n\n\ndef primes_range(start, end):\n \"\"\" Get a range of prime number with a generator\n Based on primes_sieve(limit) \n Try it: print(list(primes_range(50, 100)))\n \"\"\"\n if start is None or end is None:\n raise ValueError(\"Start and end values can't be empty.\")\n if start >= end:\n raise ValueError(\"Start must be smaller than the end value.\")\n \n for i in primes_sieve2(end):\n if i >= start:\n yield i\n\n\n\n\n","sub_path":"project_euler/pe_utils/pe_math.py","file_name":"pe_math.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"254999119","text":"__author__ = 'HXiao'\nfrom werkzeug.contrib.cache import SimpleCache\n\nimport bmemcached\nimport config\nconfig=config.config\n# fundValueCache = bmemcached.Client((config.cacheConfig['host'],),config.cacheConfig['user'])\n# mnyValueCache = bmemcached.Client((config.cacheConfig['host'],),config.cacheConfig['user'])\n# bankValueCache = bmemcached.Client((config.cacheConfig['host'],),config.cacheConfig['user'])\n# fundRiskCache = bmemcached.Client((config.cacheConfig['host'],),config.cacheConfig['user'])\n# bankRiskCache = bmemcached.Client((config.cacheConfig['host'],),config.cacheConfig['user'])\n#\n# indexRiskCache = bmemcached.Client((config.cacheConfig['host'],),config.cacheConfig['user'])\n# cacheObject = bmemcached.Client((config.cacheConfig['host'],),config.cacheConfig['user'])\n\n\ncacheObject = SimpleCache()\n\n# fundValueCache = SimpleCache()\n# mnyValueCache = SimpleCache()\n# bankValueCache = SimpleCache()\n#\n# fundRiskCache = SimpleCache()\n# bankRiskCache = SimpleCache()\n#\n# indexRiskCache = SimpleCache()\n\n\n\ndef get_my_item(keyvalue, cache=cacheObject):\n es = cache.get(keyvalue)\n if es is None:\n return None\n return es\n\n\n\ndef set_cache(keyvalue,data,cache=cacheObject,cachetime=None):\n if cachetime is None:\n cachetime = 300\n else:\n pass\n cache.set(keyvalue, data, timeout=cachetime)\n return None\n\n\ndef use_cache(func):\n def _useCache(*args, **kwargs):\n # starttime=datetime.datetime.now()\n try:\n cachekey=kwargs['cachekey']\n except:\n cachekey = None\n try:\n cache=kwargs['cache']\n except:\n cache = cacheObject\n\n try:\n cachetime=kwargs['cachetime']\n except:\n cachetime = None\n\n # if (cachekey is not None) and (cache is not None):\n if (cachekey is not None):\n \"\"\"\n get the request uri and set it as a key in memcache\n \"\"\"\n cacheValue= get_my_item(cachekey, cache)\n if cacheValue is None:\n # print(\"not use cache\")\n result = func(*args, **kwargs)\n set_cache(cachekey,result,cache,cachetime)\n else:\n # print(\"use cache key=\"+str(cachekey))\n result= cacheValue\n else:\n result = func(*args, **kwargs)\n\n # endtime=datetime.datetime.now()\n # print(str((endtime - starttime).microseconds/1000) + 'ms ====es time')\n return result\n return _useCache\n\n\n","sub_path":"Server/Cache/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"575131177","text":"'''\nPerform face detection in real-time using webcam\n'''\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport datetime\nimport numpy as np\nimport os\nimport cv2\nimport har_face_v2\nimport sys\nimport argparse\nimport time\nimport copy\nimport csv\n\n\ndef add_overlays(frame, faces, frame_rate):\n\n if faces is not None:\n for id_face in faces:\n\n name_gender = '%s %s' % (id_face.name, id_face.gender)\n age = 'Age: %s Yr.' % (id_face.age)\n face_bb = id_face.bounding_box.astype(int)\n\n if id_face.name == 'Unknown':\n box_color = (0, 0, 255)\n text_color = (0, 0, 255)\n else:\n box_color = (0, 255, 0)\n text_color = (255, 10, 0)\n\n cv2.rectangle(frame,\n (face_bb[0], face_bb[1]),\n (face_bb[2], face_bb[3]),\n box_color,\n 2)\n\n labelSize_n, baseLine_n = cv2.getTextSize(name_gender,\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.7,\n 2)\n label_ymin_n = max(face_bb[1], labelSize_n[1] + 10)\n\n cv2.rectangle(frame,\n (face_bb[0], label_ymin_n-labelSize_n[1]-10),\n (face_bb[0]+labelSize_n[0], label_ymin_n+baseLine_n-10),\n (255, 255, 255),\n cv2.FILLED)\n\n labelSize_a, baseLine_a = cv2.getTextSize(age,\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.7,\n 2)\n label_ymin_a = max(face_bb[3], labelSize_a[1] - 10)\n\n cv2.rectangle(frame,\n (face_bb[0], label_ymin_a-labelSize_a[1]-10),\n (face_bb[0]+labelSize_a[0], label_ymin_a+baseLine_a-10),\n (255, 255, 255),\n cv2.FILLED)\n\n if id_face.name is not None:\n cv2.putText(frame,\n name_gender,\n (face_bb[0], label_ymin_n-7),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.70,\n text_color,\n thickness=2,\n lineType=2)\n cv2.putText(frame,\n age,\n (face_bb[0], label_ymin_a-7),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.70,\n text_color,\n thickness=2,\n lineType=2)\n\n cv2.putText(frame,\n str(frame_rate) + ' fps',\n (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n (0, 255, 0),\n thickness=2,\n lineType=2)\n return frame\n\n\ndef main(args):\n # number of frame after which to run face detection\n frame_interval = 3\n\n fps_display_interval = 5 # second\n frame_rate = 0\n frame_count = 0\n faces = []\n face_data = []\n prev_faces = []\n last_faces = []\n\n video_capture = cv2.VideoCapture(0)\n video_capture.set(cv2.CAP_PROP_FRAME_WIDTH, 960)\n video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 540)\n\n face_recognition = har_face_v2.Recognition()\n\n if args.debug:\n # print(\"Debug Enabled\")\n har_face_v2.debug = True\n\n if not os.path.exists('../output/'):\n os.makedirs('../output/')\n\n with open(\"../output/face_data.csv\", 'w', newline='') as f:\n wr = csv.writer(f, quoting=csv.QUOTE_ALL)\n wr.writerow(['Time', 'Identified_Person', 'Age', 'Gender'])\n\n start_time = time.time()\n start_time_data = time.time()\n\n while True:\n # capture frame\n if video_capture.isOpened():\n ret, frame = video_capture.read()\n\n if (frame_count % frame_interval) == 0:\n if len(faces) > 0:\n last_faces = copy.deepcopy(faces)\n faces = face_recognition.identify(frame, last_faces)\n\n # check current fps\n end_time = time.time()\n if (end_time - start_time) > fps_display_interval:\n frame_rate = int(frame_count / (end_time - start_time))\n start_time = time.time()\n frame_count = 0\n if len(faces) > 0:\n frame = add_overlays(frame, faces, frame_rate)\n frame_count += 1\n cv2.imshow('Realtime Recognition', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n if len(prev_faces) == 0:\n prev_faces = copy.deepcopy(faces)\n for face in faces:\n face_data.append(face)\n continue\n\n face_data, prev_faces = get_detected_faces(faces,\n prev_faces,\n face_data)\n\n if (time.time() - start_time_data) > 900:\n with open(\"../output/face_data.csv\", 'a', newline='') as f:\n wr = csv.writer(f, quoting=csv.QUOTE_ALL)\n for face in face_data:\n ti = '{:%Y-%m-%d %H:%M:%S}'.format(face.timestamp)\n face_info = [ti, face.name, face.age, face.gender]\n wr.writerow(face_info)\n face_data = []\n start_time_data = time.time()\n\n # when everything is done\n video_capture.release()\n cv2.destroyAllWindows()\n\n with open(\"../output/face_data.csv\", 'a', newline='') as f:\n wr = csv.writer(f, quoting=csv.QUOTE_ALL)\n for face in face_data:\n ti = '{:%Y-%m-%d %H:%M:%S}'.format(face.timestamp)\n face_info = [ti, face.name, face.age, face.gender]\n wr.writerow(face_info)\n\n\ndef get_detected_faces(faces, prev_faces, face_data):\n if len(prev_faces) > 0 and len(faces) > 0:\n for face in faces:\n for prev_face in prev_faces:\n time_delta = face.timestamp - prev_face.timestamp\n if face.name == prev_face.name and face.name == 'Unknown':\n l2_dist = np.linalg.norm(face.embedding - prev_face.embedding)\n # face_centroid = bb_centroid(face)\n # prev_face_centroid = bb_centroid(prev_face)\n centroid_dist = dist_centroid(bb_centroid(face),\n bb_centroid(prev_face))\n threshold = get_threshold(prev_face)\n\n if l2_dist < 1.10 and centroid_dist < threshold:\n if (time_delta.total_seconds() > 60):\n face_data.append(face)\n prev_face.timestamp = datetime.datetime.now()\n\n elif face.name == prev_face.name and face.name != 'Unknown':\n if (time_delta.total_seconds() > 60):\n face_data.append(face)\n prev_face.timestamp = datetime.datetime.now()\n\n if face.name not in [f.name for f in prev_faces]:\n face_data.append(face)\n prev_faces.append(face)\n\n return face_data, prev_faces\n\n\ndef bb_centroid(face):\n x = face.bounding_box[0] + face.bounding_box[2]//2\n y = face.bounding_box[1] + face.bounding_box[3]//3\n return (x, y)\n\n\ndef dist_centroid(c1, c2):\n x1, y1 = c1[0], c1[1]\n x2, y2 = c2[0], c2[1]\n return np.sqrt((x1-x2)**2 + (y1-y2)**2)\n\n\ndef get_threshold(t_face):\n w = t_face.bounding_box[2]\n h = t_face.bounding_box[3]\n return 0.75 * np.sqrt((w/2)**2 + (h/2)**2)\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--debug',\n action='store_true',\n help='Enable some debug outputs.')\n return parser.parse_args(argv)\n\n\nif __name__ == '__main__':\n main(parse_arguments(sys.argv[1:]))\n","sub_path":"src/har_real_time_recognition_v2.py","file_name":"har_real_time_recognition_v2.py","file_ext":"py","file_size_in_byte":8203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"476086483","text":"#!/usr/bin/env python3\n\nimport unittest\nfrom lrank.ranker import Ranker\nfrom lrank.error import FileNotFoundError\n\n\nclass RankerTestCase(unittest.TestCase):\n\n def test_file_not_exits(self):\n \"\"\"\n If the input file is not found a FileNotFoundError should be raised.\n \"\"\"\n test_ranker = Ranker()\n with self.assertRaises(FileNotFoundError):\n test_ranker.rank('missing_file.txt')\n\n def test_base_case(self):\n \"\"\"\n Make sure the basic provided example passes the test.\n \"\"\"\n example_input = 'test/data/input_single_duplicate.txt'\n expected_output = 'test/data/output_single_duplicate.txt'\n\n expected_results = get_expected_results(expected_output)\n\n test_ranker = Ranker()\n self.assertEqual(test_ranker.rank(example_input), expected_results, 'Did not rank the data correctly.')\n\n def test_multiple_duplicates(self):\n \"\"\"\n Make sure that multiple duplicates are handled.\n \"\"\"\n example_input = 'test/data/input_multiple_duplicates.txt'\n expected_output = 'test/data/output_multiple_duplicates.txt'\n\n expected_results = get_expected_results(expected_output)\n\n test_ranker = Ranker()\n self.assertEqual(test_ranker.rank(example_input), expected_results, 'Did not handle multiple duplicates.')\n\n def test_double_digit_scores(self):\n \"\"\"\n Make sure that scores with double digits are handled properly.\n \"\"\"\n example_input = 'test/data/input_high_scores.txt'\n expected_output = 'test/data/output_high_scores.txt'\n\n expected_results = get_expected_results(expected_output)\n\n test_ranker = Ranker()\n self.assertEqual(test_ranker.rank(example_input), expected_results, 'Large scores broke the build.')\n\n def test_long_team_names(self):\n \"\"\"\n Make sure that teams can have really long names.\n \"\"\"\n example_input = 'test/data/input_long_team_names.txt'\n expected_output = 'test/data/output_long_team_names.txt'\n\n expected_results = get_expected_results(expected_output)\n\n test_ranker = Ranker()\n self.assertEqual(test_ranker.rank(example_input), expected_results, 'Long team names broke the build.')\n\n def test_quoted_names(self):\n \"\"\"\n Make sure that teams can have quotes in their names.\n \"\"\"\n example_input = 'test/data/input_quotes.txt'\n expected_output = 'test/data/output_quotes.txt'\n\n expected_results = get_expected_results(expected_output)\n\n test_ranker = Ranker()\n self.assertEqual(test_ranker.rank(example_input), expected_results, 'Quotes in the team-names broke the build.')\n\n\ndef get_expected_results(filename):\n \"\"\"\n A utility method for reading the lines of a file and returning them as a list.\n\n :param filename: str\n :return: list of str\n \"\"\"\n with open(filename, 'r') as file_handle:\n expected_results = []\n\n for row in file_handle:\n expected_results.append(row.rstrip())\n\n return expected_results\n","sub_path":"test/ranker_test.py","file_name":"ranker_test.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228258619","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/nico/dev/Surprise/examples/precision_recall_at_k.py\n# Compiled at: 2019-01-04 08:05:49\n# Size of source mod 2**32: 2123 bytes\n\"\"\"\nThis module illustrates how to compute Precision at k and Recall at k metrics.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom collections import defaultdict\nfrom surprise import Dataset\nfrom surprise import SVD\nfrom surprise.model_selection import KFold\n\ndef precision_recall_at_k(predictions, k=10, threshold=3.5):\n \"\"\"Return precision and recall at k metrics for each user.\"\"\"\n user_est_true = defaultdict(list)\n for uid, _, true_r, est, _ in predictions:\n user_est_true[uid].append((est, true_r))\n\n precisions = dict()\n recalls = dict()\n for uid, user_ratings in user_est_true.items():\n user_ratings.sort(key=(lambda x: x[0]), reverse=True)\n n_rel = sum(true_r >= threshold for _, true_r in user_ratings)\n n_rec_k = sum(est >= threshold for est, _ in user_ratings[:k])\n n_rel_and_rec_k = sum(true_r >= threshold and est >= threshold for est, true_r in user_ratings[:k])\n precisions[uid] = n_rel_and_rec_k / n_rec_k if n_rec_k != 0 else 1\n recalls[uid] = n_rel_and_rec_k / n_rel if n_rel != 0 else 1\n\n return (precisions, recalls)\n\n\ndata = Dataset.load_builtin('ml-100k')\nkf = KFold(n_splits=5)\nalgo = SVD()\nfor trainset, testset in kf.split(data):\n algo.fit(trainset)\n predictions = algo.test(testset)\n precisions, recalls = precision_recall_at_k(predictions, k=5, threshold=4)\n print(sum(prec for prec in precisions.values()) / len(precisions))\n print(sum(rec for rec in recalls.values()) / len(recalls))","sub_path":"pycfiles/scikit-surprise-1.1.0.tar/precision_recall_at_k.cpython-36.py","file_name":"precision_recall_at_k.cpython-36.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"257853990","text":"import os, glob, stat, sys, imp\nimport sibispy\n\nclass SummaryScoresCollector():\n def __init__(self, module_init_script):\n\n self.fields_list = dict()\n self.functions = dict()\n self.output_form = dict()\n self.instrument_list = []\n\n module_dir = os.path.dirname(os.path.abspath(module_init_script))\n\n instruments = [ os.path.basename( d ) for d in glob.glob(os.path.join(module_dir,'*')) if stat.S_ISDIR( os.stat( d ).st_mode ) and os.path.exists( os.path.join( d, '__init__.py' ) ) ]\n\n sys.path.append( os.path.abspath(os.path.dirname(module_init_script) ) )\n\n for i in instruments:\n module_found = imp.find_module( i, [module_dir] )\n module = imp.load_module( i, module_found[0], module_found[1], module_found[2] )\n\n self.instrument_list.append( i )\n self.fields_list[i] = module.input_fields\n self.functions[i] = module.compute_scores\n self.output_form[i] = module.output_form\n\n # dataframe and errorFlag\n def compute_scores(self, instrument, input_data, demographics, log, **kwargs):\n if log != sibispy.sibislogger:\n raise TypeError(\"Call must include a sibislogger!\")\n scoresDF = self.functions[instrument](input_data, demographics, log=log, **kwargs)\n \n # remove nan entries as they corrupt data ingest (REDCAP cannot handle it correctly) and superfluous zeros\n # this gave an error as it only works for float values to replace\n if len(scoresDF):\n # Only execute it not empty \n return (scoresDF.astype(object).fillna(''), False)\n \n return (scoresDF, False)\n","sub_path":"summary_scores_util.py","file_name":"summary_scores_util.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"583320358","text":"from core.agility.v2_0.agilitymodel.base.Link import LinkBase\n\nclass ItemLinkBase(LinkBase):\n '''\n classdocs\n '''\n def __init__(self, parent=None):\n LinkBase.__init__(self)\n self._attrSpecs = getattr(self, '_attrSpecs', {})\n self._attrSpecs.update({'parent': {'type': 'Link', 'name': 'parent', 'minOccurs': '0', 'native': False}})\n self.parent = parent \n","sub_path":"core/agility/v2_0/agilitymodel/base/ItemLink.py","file_name":"ItemLink.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"407285505","text":"from riotwatcher import LolWatcher, ApiError\n\nlol_watcher = LolWatcher('RGAPI-b34c9615-2352-439e-8324-4326e28fbee1')\n\nmy_region = 'kr'\ntarget = input('검색할 닉네임을 입력하세요:\\n')\n\nme = lol_watcher.summoner.by_name(my_region, target)\nprint(me)\n\n# all objects are returned (by default) as a dict\n# lets see if i got diamond yet (i probably didnt)\nmy_ranked_stats = lol_watcher.league.by_summoner(my_region, me['id'])\nprint(my_ranked_stats)\n\n# First we get the latest version of the game from data dragon\nversions = lol_watcher.data_dragon.versions_for_region(my_region)\nchampions_version = versions['n']['champion']\n\n# Lets get some champions\ncurrent_champ_list = lol_watcher.data_dragon.champions(champions_version)\nprint(current_champ_list)\n\n# For Riot's API, the 404 status code indicates that the requested data wasn't found and\n# should be expected to occur in normal operation, as in the case of a an\n# invalid summoner name, match ID, etc.\n#\n# The 429 status code indicates that the user has sent too many requests\n# in a given amount of time (\"rate limiting\").\n\ntry:\n response = lol_watcher.summoner.by_name(my_region, 'this_is_probably_not_anyones_summoner_name')\nexcept ApiError as err:\n if err.response.status_code == 429:\n print('We should retry in {} seconds.'.format(err.headers['Retry-After']))\n print('this retry-after is handled by default by the RiotWatcher library')\n print('future requests wait until the retry-after time passes')\n elif err.response.status_code == 404:\n print('Summoner with that ridiculous name not found.')\n else:\n raise\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"37132514","text":"#!/usr/bin/python\n\n# {\n# \"author\" : \"Justin E. Sheen\"\n# ,\"company\" : \"Spit Valve Software\"\n# ,\"github\" : \"https://github.com/capataz\"\n# ,\"email\" : \"spitvalvesoftware@gmail.com\"\n# }\n\nimport unittest\nimport os.path\nimport sys\nimport filecmp\n\nclass MainTest(unittest.TestCase):\n def setUp(self):\n return\n\n\n def tearDown(self):\n return\n\n\n #def test_main_without_args(self):\n # self.assertEqual(main.main(), 0)\n # return\n \n def test_case_1(self):\n args = ['-i', '../../test/Test_1_input.txt',\n '-o', './output1.txt']\n main.main(args)\n self.assertEqual(True, filecmp.cmp('./output1.txt',\n '../../test/Test_1_output.txt'))\n\n \n def test_case_2(self):\n args = ['-i', '../../test/Test_2_input.txt',\n '-o', './output2.txt']\n main.main(args)\n self.assertEqual(True, filecmp.cmp('./output2.txt',\n '../../test/Test_2_output.txt'))\n\n \n def test_case_3(self):\n args = ['-i', '../../test/Test_3_input.txt',\n '-o', './output3.txt']\n main.main(args)\n self.assertEqual(True, filecmp.cmp('./output3.txt',\n '../../test/Test_3_output.txt'))\n \n def test_case_4(self):\n args = ['-i', '../../test/Test_4_input.txt',\n '-o', './output4.txt']\n main.main(args)\n self.assertEqual(True, filecmp.cmp('./output4.txt',\n '../../test/Test_4_output.txt'))\n\n\n def test_case_5(self):\n args = ['-i', '../../test/Test_5_input.txt',\n '-o', './output5.txt']\n main.main(args)\n self.assertEqual(True, filecmp.cmp('./output5.txt',\n '../../test/Test_5_output.txt')) \n\n\nif __name__ == '__main__':\n sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n import main\n\n unittest.main()\n","sub_path":"codingames.com/mime/python/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"19144743","text":"import sys\nclass Solution:\n def findBestValue(self, arr: [int], target: int) -> int:\n arr.sort()\n sumption=0\n length=len(arr)\n for i in range(length):\n ans=round((target-sumption)/length)\n if ans<=arr[i]: return ans\n sumption+=arr[i]\n length-=1\n return arr[-1]\n\nif __name__ == '__main__':\n nums=[1547,83230,57084,93444,70879]\n s=Solution()\n print(s.findBestValue(nums,71237))","sub_path":"Medium/1300/1300.py","file_name":"1300.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"112629699","text":"# Hint: You may not need all of these. Remove the unused functions.\nclass Ticket:\n def __init__(self, source, destination):\n self.source = source\n self.destination = destination\n\n\n\ndef reconstruct_trip(tickets, length):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # Your code here\n tickets_hash = {}\n\n route = []\n\n # hash each ticket\n for i in tickets:\n tickets_hash[i.source] = i.destination\n \n\n # find the first ticket, source == \"NONE\"\n prev_ticket = tickets_hash[\"NONE\"]\n route.append(prev_ticket)\n \n next_ticket = tickets_hash[prev_ticket]\n i=0\n while i < len(tickets)-1:\n route.append(next_ticket)\n prev_ticket = next_ticket\n next_ticket = tickets_hash[next_ticket]\n i += 1\n\n\n return route # route will be an array of strings with entire trip in order\n","sub_path":"hashtables/ex2/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"522754626","text":"'''\nDate: 2021-06-08 10:04:21\nLastEditors: Liuliang\nLastEditTime: 2021-06-09 18:29:51\nDescription: main\n'''\n\n# 首先导入包\nimport torch\nimport torch.nn as nn\nimport pandas as pd\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nfrom PIL import Image\nimport os\n\nimport matplotlib.pyplot as plt\nimport torchvision.models as models\nfrom torchvision.transforms.transforms import RandomCrop\n# This is for the progress bar.\nfrom tqdm import tqdm\nfrom torch.utils.tensorboard import SummaryWriter\nimport ipdb\nimport albumentations\nimport cv2\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0,1'\ndef get_device():\n return 'cuda' if torch.cuda.is_available() else 'cpu'\n\ndevice = get_device()\ntorch.backends.cudnn.benchmark = True\nprint(device)\n\ntb_writer = SummaryWriter(log_dir=\"runs/flower_experiment\")\n\n# 看看label文件长啥样\nlabels_dataframe = pd.read_csv('/home/liuliang/leaves/train.csv')\n\n\n# 把label文件排个序\nleaves_labels = sorted(list(set(labels_dataframe['label'])))\nn_classes = len(leaves_labels)\nclass_to_num = dict(zip(leaves_labels, range(n_classes)))\nnum_to_class = {v : k for k, v in class_to_num.items()}\n\nclass LeavesData(Dataset):\n def __init__(self, csv_path, file_path, mode='train', valid_ratio=0.2, resize_height=256, resize_width=256):\n \"\"\"\n Args:\n csv_path (string): csv 文件路径\n img_path (string): 图像文件所在路径\n mode (string): 训练模式还是测试模式\n valid_ratio (float): 验证集比例\n \"\"\"\n \n # 需要调整后的照片尺寸,我这里每张图片的大小尺寸不一致#\n self.resize_height = resize_height\n self.resize_width = resize_width\n\n self.file_path = file_path\n self.mode = mode\n\n # 读取 csv 文件\n # 利用pandas读取csv文件\n self.data_info = pd.read_csv(csv_path, header=None) #header=None是去掉表头部分\n # 计算 length\n self.data_len = len(self.data_info.index) - 1\n self.train_len = int(self.data_len * (1 - valid_ratio))\n \n if mode == 'train':\n # 第一列包含图像文件的名称\n self.train_image = np.asarray(self.data_info.iloc[1:, 0])\n # self.train_image = np.asarray(self.data_info.iloc[1:self.train_len, 0]) #self.data_info.iloc[1:,0]表示读取第一列,从第二行开始到train_len\n # 第二列是图像的 label\n self.train_label = np.asarray(self.data_info.iloc[1:, 1])\n # self.train_label = np.asarray(self.data_info.iloc[1:self.train_len, 1])\n\n self.image_arr = self.train_image \n self.label_arr = self.train_label\n elif mode == 'valid':\n self.valid_image = np.asarray(self.data_info.iloc[self.train_len:, 0]) \n self.valid_label = np.asarray(self.data_info.iloc[self.train_len:, 1])\n self.image_arr = self.valid_image\n self.label_arr = self.valid_label\n elif mode == 'test':\n self.test_image = np.asarray(self.data_info.iloc[1:, 0])\n self.image_arr = self.test_image\n \n self.real_len = len(self.image_arr)\n\n print('Finished reading the {} set of Leaves Dataset ({} samples found)'\n .format(mode, self.real_len))\n\n def __getitem__(self, index):\n # 从 image_arr中得到索引对应的文件名\n single_image_name = self.image_arr[index]\n\n # 读取图像文件\n img_as_img = Image.open(self.file_path + single_image_name)\n\n #如果需要将RGB三通道的图片转换成灰度图片可参考下面两行\n# if img_as_img.mode != 'L':\n# img_as_img = img_as_img.convert('L')\n\n #设置好需要转换的变量,还可以包括一系列的nomarlize等等操作\n if self.mode == 'train':\n transform = transforms.Compose([\n # transforms.CenterCrop(224),\n # transforms.TenCrop(224, vertical_flip=False),\n # transforms.Resize((224, 224)),\n # transforms.TenCrop\n transforms.RandomCrop(224),\n transforms.RandomVerticalFlip(p=0.5) ,\n transforms.RandomHorizontalFlip(p=0.5), \n # transforms.RandomResizedCrop(size=224, scale=(0.8, 1.0)),\n # transforms.RandomRotation(degrees=15),\n #transforms.RandomHorizontalFlip(),\n #transforms.CenterCrop(size=224),\n transforms.ToTensor(),\n # transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])\n ])\n\n\n\n else:\n # valid和test不做数据增强\n transform = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.CenterCrop(size=224),\n transforms.ToTensor(),\n # transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])\n ])\n \n img_as_img = transform(img_as_img)\n \n if self.mode == 'test':\n return img_as_img\n else:\n # 得到图像的 string label\n label = self.label_arr[index]\n # number label\n number_label = class_to_num[label]\n\n return img_as_img, number_label #返回每一个index对应的图片数据和对应的label\n\n def __len__(self):\n return self.real_len\n\n \n\ntrain_path = '/home/liuliang/leaves/train.csv'\ntest_path = '/home/liuliang/leaves/test.csv'\n# csv文件中已经images的路径了,因此这里只到上一级目录\nimg_path = '/home/liuliang/leaves/'\n\ntrain_dataset = LeavesData(train_path, img_path, mode='train')\nval_dataset = LeavesData(train_path, img_path, mode='valid')\ntest_dataset = LeavesData(test_path, img_path, mode='test')\nbatch_size = 224\n# 定义data loader\ntrain_loader = torch.utils.data.DataLoader(\n dataset=train_dataset,\n batch_size=batch_size, \n shuffle=True,\n num_workers=8,\n pin_memory=True\n )\n\nval_loader = torch.utils.data.DataLoader(\n dataset=val_dataset,\n batch_size=batch_size, \n shuffle=False,\n num_workers=8,\n pin_memory=True\n )\ntest_loader = torch.utils.data.DataLoader(\n dataset=test_dataset,\n batch_size=batch_size, \n shuffle=False,\n num_workers=8,\n pin_memory=True\n )\n \n# 是否要冻住模型的前面一些层\ndef set_parameter_requires_grad(model, feature_extracting):\n if feature_extracting:\n model = model\n for param in model.parameters():\n param.requires_grad = False\n\n\n\n\n# resnet152模型\ndef res_model(num_classes, feature_extract = False, use_pretrained=True):\n\n model_ft = models.resnet50(pretrained=use_pretrained)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, num_classes))\n\n return model_ft\n\n# 超参数, 这里为了演示就训练5轮看看\nlearning_rate = 1e-4\nweight_decay = 1e-3\nnum_epoch = 300\nmodel_path = './pre_res_model.ckpt'\n\n# torch.distributed.init_process_group(backend=\"nccl\")\nmodel = res_model(176)\nmodel = nn.DataParallel(model)\nmodel = model.to(device)\n# model = nn.parallel.DistributedDataParallel(model) # device_ids will include all GPU devices by default\n\n\n# For the classification task, we use cross-entropy as the measurement of performance.\ncriterion = nn.CrossEntropyLoss()\n\n# Initialize optimizer, you may fine-tune some hyperparameters such as learning rate on your own.\noptimizer = torch.optim.AdamW(model.parameters(), lr = learning_rate, weight_decay=0.02)\n# optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate)\nscheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 30, eta_min=0, last_epoch=-1)\n# The number of training epochs.\nn_epochs = num_epoch\n\n# ipdb.set_trace()\nbest_acc = 0.0\nfor epoch in range(n_epochs):\n # ---------- Training ----------\n # Make sure the model is in train mode before training.\n model.train() \n # These are used to record information in training.\n train_loss = []\n train_accs = []\n # Iterate the training set by batches.\n for batch in tqdm(train_loader):\n # A batch consists of image data and corresponding labels.\n imgs, labels = batch\n imgs = imgs.to(device)\n labels = labels.to(device)\n # Forward the data. (Make sure data and model are on the same device.)\n logits = model(imgs)\n # Calculate the cross-entropy loss.\n # We don't need to apply softmax before computing cross-entropy as it is done automatically.\n loss = criterion(logits, labels)\n \n \n # Gradients stored in the parameters in the previous step should be cleared out first.\n optimizer.zero_grad()\n # Compute the gradients for parameters.\n loss.backward()\n # Update the parameters with computed gradients.\n optimizer.step()\n \n \n # Compute the accuracy for current batch.\n acc = (logits.argmax(dim=-1) == labels).float().mean()\n\n # Record the loss and accuracy.\n train_loss.append(loss.item())\n train_accs.append(acc)\n \n scheduler.step() \n # The average loss and accuracy of the training set is the average of the recorded values.\n train_loss = sum(train_loss) / len(train_loss)\n train_acc = sum(train_accs) / len(train_accs)\n\n # Print the information.\n print(f\"[ Train | {epoch + 1:03d}/{n_epochs:03d} ] loss = {train_loss:.5f}, acc = {train_acc:.5f},best_acc = {best_acc:.5f}\")\n \n \n # ---------- Validation ----------\n # Make sure the model is in eval mode so that some modules like dropout are disabled and work normally.\n model.eval()\n # These are used to record information in validation.\n valid_loss = []\n valid_accs = []\n \n # Iterate the validation set by batches.\n for batch in tqdm(val_loader):\n imgs, labels = batch\n # We don't need gradient in validation.\n # Using torch.no_grad() accelerates the forward process.\n with torch.no_grad():\n logits = model(imgs.to(device))\n \n # We can still compute the loss (but not the gradient).\n loss = criterion(logits, labels.to(device))\n\n # Compute the accuracy for current batch.\n acc = (logits.argmax(dim=-1) == labels.to(device)).float().mean()\n\n # Record the loss and accuracy.\n valid_loss.append(loss.item())\n valid_accs.append(acc)\n \n # The average loss and accuracy for entire validation set is the average of the recorded values.\n valid_loss = sum(valid_loss) / len(valid_loss)\n valid_acc = sum(valid_accs) / len(valid_accs)\n\n # Print the information.\n print(f\"[ Valid | {epoch + 1:03d}/{n_epochs:03d} ] loss = {valid_loss:.5f}, acc = {valid_acc:.5f}, lr = {optimizer.param_groups[0]['lr']:.5f}\")\n\n tb_writer.add_scalar(\"train_loss\", train_loss, epoch)\n tb_writer.add_scalar(\"train_acc\", train_acc, epoch)\n tb_writer.add_scalar(\"learning_rate\", optimizer.param_groups[0]['lr'], epoch)\n tb_writer.add_scalar(\"valid_loss\",valid_loss,epoch)\n tb_writer.add_scalar(\"valid_acc\",valid_acc,epoch)\n \n \n\n \n # if the model improves, save a checkpoint at this epoch\n if valid_acc > best_acc:\n best_acc = valid_acc\n torch.save(model.state_dict(), model_path)\n print('saving model with acc {:.3f}'.format(best_acc))\n\ntb_writer.close() \n","sub_path":"kaggle/leaves/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"300092091","text":"#!/usr/bin/env python3\n\nfrom bst import BST, BSTNode\n\nclass RBNode(BSTNode):\n def __init__(self, key):\n super().__init__(key)\n self.color = 'r'\n\n def __str__(self):\n return str(self.key) + ' ' + str(self.color)\n\n def _get_child(self):\n '''if the node has at most one child, call this function to get it,\n if the node has no child, return a special child whose key is None'''\n\n if self.left is None and self.right is not None:\n return self.right\n elif self.right is None and self.left is not None:\n return self.left\n elif self.left is None and self.right is None:\n node = RBNode(None)\n node.parent=self.parent\n node.color = 'b'\n return node\n else:\n raise Exception('the node {} has two children!'.format(self))\n\nclass RB(BST):\n def _insert_fix(self, node):\n '''fix the color when insert a node'''\n\n while node.parent and node.parent.color == 'r':\n grandpa = node.parent.parent\n\n if grandpa is None:\n node.parent.color = 'b'\n return\n\n if grandpa.left is node.parent:\n uncle = grandpa.right\n if uncle and uncle.color == 'r':\n node.parent.color = 'b'\n uncle.color = 'b'\n grandpa.color = 'r'\n node = grandpa\n else:\n if node is node.parent.right:\n self.left_rotate(node)\n node = node.left\n self.right_rotate(node.parent)\n node.parent.color = 'b'\n grandpa.color = 'r'\n return\n\n else:\n uncle = grandpa.left\n if uncle and uncle.color == 'r':\n node.parent.color = 'b'\n uncle.color = 'b'\n grandpa.color = 'r'\n node = grandpa\n else:\n if node is node.parent.left:\n self.right_rotate(node)\n node = node.right\n self.left_rotate(node.parent)\n node.parent.color = 'b'\n grandpa.color = 'r'\n return\n\n self.root.color = 'b'\n\n def insert(self, node):\n '''rewrite the insert function'''\n\n super().insert(node)\n self._insert_fix(node)\n\n def _delete_fix(self, node):\n '''fix the color when delete a black node'''\n\n while node.parent and node.color == 'b':\n if node.parent.right and node.parent.right is not node:\n brother = node.parent.right\n\n if brother.color == 'r':\n self.left_rotate(brother)\n brother.color = 'b'\n node.parent.color = 'r'\n brother = node.parent.right\n\n if (brother.left is None and brother.right is None) or (\n brother.left is None and brother.right.color == 'b') or (\n brother.right is None and brother.left.color == 'b') or (\n brother.left and brother.left.color == 'b' and brother.right\n and brother.right.color == 'b'):\n brother.color = 'r'\n node = node.parent\n\n elif brother.right is None or brother.right.color == 'b':\n self.right_rotate(brother.left)\n brother.color = 'r'\n brother.parent.color = 'b'\n\n else:\n self.left_rotate(brother)\n brother.color = node.parent.color\n brother.right.color = 'b'\n node.parent.color = 'b'\n return\n\n else:\n brother = node.parent.left\n\n if brother.color == 'r':\n self.right_rotate(brother)\n brother.color = 'b'\n node.parent.color = 'r'\n brother = node.parent.left\n\n if (brother.left is None and brother.right is None) or (\n brother.left is None and brother.right.color == 'b') or (\n brother.right is None and brother.left.color == 'b') or (\n brother.left and brother.left.color == 'b' and brother.right\n and brother.right.color == 'b'):\n brother.color = 'r'\n node = node.parent\n\n elif brother.left is None or brother.left.color == 'b':\n self.left_rotate(brother.right)\n brother.color = 'r'\n brother.parent.color = 'b'\n\n else:\n self.right_rotate(brother)\n brother.color = node.parent.color\n brother.left.color = 'b'\n node.parent.color = 'b'\n return\n\n node.color = 'b'\n\n def delete(self, x):\n '''rewrite the delete function'''\n\n if x is None:\n raise KeyError('The key is not in the tree')\n color = x.color\n\n # has at most one child\n if x.left is None or x.right is None:\n child = x._get_child()\n self._delete(x)\n\n # has two children\n else:\n y = x.right.min()\n color = y.color\n child = y._get_child()\n self._delete(y)\n x.update_content(y)\n\n if color == 'b':\n self._delete_fix(child)\n","sub_path":"rb.py","file_name":"rb.py","file_ext":"py","file_size_in_byte":5649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"241343025","text":"# 6-7人\nroommate_0 = {'name': '小向', 'like': '追剧', 'address': '射洪'}\nroommate_1 = {'name': '康康', 'like': '看电影', 'address': '广安'}\nroommate_2 = {'name': '猴猴', 'like': '打游戏', 'address': '达州'}\npeople = [roommate_0, roommate_1, roommate_2]\nfor roommate in people:\n print(roommate['name'] + \":\")\n for name, name_info in roommate.items():\n print(\"\\t\" + name + \":\" + str(name_info) + \".\")\n\n# 6-8宠物\nbird = {'type': '鸟', 'host': '康康'}\ndog = {'type': '狗', 'host': '小向'}\ncat = {'type': '猫', 'host': '章章'}\nlion = {'type': '狮子', 'host': '猴猴'}\nmonkey = {'type': '猴子', 'host': '小胖'}\ntiger = {'type': '老虎', 'host': '文文'}\npets = [bird, dog, cat, lion, monkey, tiger]\nfor pet in pets:\n for key, value in pet.items():\n print(key, ':', value)\n\n# 6-9喜欢的地方\nfavorite_places = {\n '猴猴': ['London', 'Paris'],\n '小向': ['北京', '天津'],\n '章章': ['上海', '广州'],\n '康康': ['云南', '广西'],\n}\nfor key, value in favorite_places.items():\n print(key, 'favorite place' + ':')\n for places in value:\n print(places)\n\n# 6-10喜欢的数字\nfavorite_number = {\n '猴猴': '22222',\n '章章娃儿': '999',\n '小向': '666',\n '康康娃儿': '44444',\n '小胖': '55555'\n}\nfor persons, nums in favorite_number.items():\n print(\"\\n\" + persons + \"喜欢的数字是:\")\n for num in nums:\n print(\"\\t\" + str(num))\n\n# 6-11城市\ncities = {\n '重庆': {\n 'country': 'China',\n 'population': '3124.32万人',\n 'fact': '火锅城',\n },\n '北京': {\n 'country': 'China',\n 'population': '2154.00万人',\n 'fact': '中国的首都',\n },\n '上海': {\n 'country': 'China',\n 'population': '2424.00万人',\n 'fact': '魔都',\n }\n}\nfor city, city_info in cities.items():\n print('\\n' + city + \":\")\n for info, value in city_info.items():\n print(\"\\t\" + info.title() + \":\" + value.capitalize() + \".\")\n","sub_path":"ch7/6-7.py","file_name":"6-7.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"84703389","text":"import requests\nimport json\n# import re\n# import ssl\nimport os\nimport xmlrpc.client\n\nSavePath = r\"C:\\xfgcs\"\nurl_basicList = \"https://www.zongtongedu.com/Video/basicList\"\nurl_userDetails = \"https://www.zongtongedu.com/uc/userDetails\"\nurl_Login = \"https://www.zongtongedu.com/Login/login\"\nurl_firstBasic = \"https://www.zongtongedu.com/video/firstBasic\"\nurl_GetCouse = \"https://www.zongtongedu.com/video/GetCouse\"\nurl_basicInfo = \"https://www.zongtongedu.com/video/basicInfo\"\nurl_VideoBasic = \"https://www.zongtongedu.com/video/VideoBasic\"\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0\",\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Connection\": \"keep-alive\"\n}\ncookies = {\n \"ASP.NET_SessionId\": \"sksocyzm2imt0kbultioqvt3\",\n \"uToKen\": \"98743884163bd75fa27f43b12e6c3a15\"\n}\nRootSavePath = r\"F:\\消防工程师\"\n\ndef GetSectionVideoUrl(session, Chapter, Section, CourseInfo, basicInfoData):\n data = {\n \"examid\": basicInfoData[\"examid\"],\n \"courseid\": Section[\"courseid\"],\n \"vid\": Section[\"vid\"]\n }\n # JsonStr = session.post(url_VideoBasic, headers=headers, cookies=cookies, data=data)\n # resultVideo = JsonStr.json()\n Title = Chapter[\"title\"] + \" \" + Section[\"title\"]\n Title = Title.replace(\" \", \"_\")\n filename = str(CourseInfo['vtYear']) + \"_\" + CourseInfo['vtTitle'] + \"_\" + CourseInfo['classname'] + \"_\" + Title + \".json\"\n VideoListFile = os.path.join(SavePath, filename)\n if not os.path.isfile(VideoListFile):\n JsonStr = session.post(url_VideoBasic, headers=headers, cookies=cookies, data=data)\n if JsonStr.status_code == 500:\n print(JsonStr.reason)\n else:\n resultVideo = JsonStr.json()\n with open(VideoListFile, 'w') as f:\n json.dump(resultVideo, f, ensure_ascii=False, indent=0)\n else:\n with open(VideoListFile, \"r\") as f:\n resultVideo = json.load(f)\n if resultVideo['Status'] == 0:\n VideoInfo = {\n \"filename\": str(resultVideo['Data']['order']) + \"_\" + Title + \".mp4\",\n \"url\": resultVideo['Data']['vUrl']\n }\n CourseInfo[\"videolist\"].append(VideoInfo)\n print(VideoInfo)\n else:\n print(resultVideo['ErrMsg'])\n\n\ndef GetCourseVideoUrl(session, CourseInfo, basicInfoData):\n filename = str(CourseInfo['vtYear']) + \"_\" + CourseInfo['vtTitle'] + \"_\" + CourseInfo['classname']\n basicInfoFile = os.path.join(SavePath, filename + \".json\")\n if not os.path.isfile(basicInfoFile):\n JsonStr = session.post(url_basicInfo, headers=headers, data=basicInfoData)\n if JsonStr.status_code == 500:\n print(JsonStr.reason)\n else:\n basicInfo = JsonStr.json()\n with open(basicInfoFile, 'w') as f:\n json.dump(basicInfo, f, ensure_ascii=False, indent=0)\n else:\n with open(basicInfoFile, \"r\") as f:\n basicInfo = json.load(f)\n\n for Chapter in basicInfo['Data']['infoList']:\n if len(Chapter[\"infoList\"]) == 0:\n Section1 = Chapter\n GetSectionVideoUrl(session, Chapter, Section1, CourseInfo, basicInfoData)\n else:\n for Section in Chapter[\"infoList\"]:\n GetSectionVideoUrl(session, Chapter, Section, CourseInfo, basicInfoData)\n\n # GetVideo(CourseInfo)\n JsonFileName = os.path.join(SavePath, filename + \"_VideoList.json\")\n with open(JsonFileName, 'w') as f:\n json.dump(CourseInfo, f, ensure_ascii=False, indent=0)\n\n\ndef GetVideo(CourseInfo):\n SavePath = os.path.join(RootSavePath, str(CourseInfo['vtYear']), (CourseInfo['vtTitle'] + '_' + CourseInfo['classname']))\n if not os.path.isdir(SavePath):\n os.makedirs(SavePath)\n\n with xmlrpc.client.ServerProxy(\"http://localhost:6800/rpc\") as s:\n for filevideo in CourseInfo['videolist']:\n if not os.path.isfile(os.path.join(SavePath, filevideo['filename'])) and filevideo['url'] != '':\n r = s.aria2.addUri([filevideo['url']], {\"dir\": SavePath, \"out\": filevideo['filename']})\n pass\n pass\n\nif __name__ == \"__main__\":\n\n examid = 12\n\n with open(os.path.join(SavePath, 'LogInData.json'), \"r\") as f:\n LogInData = json.load(f)\n session = requests.session()\n # session.post(url_Login, data=LogInData)\n\n firstBasicFile = os.path.join(SavePath, \"firstBasic.json\")\n if not os.path.isfile(firstBasicFile):\n data = {\n \"courseid\": 2,\n \"examid\": examid,\n \"year\": 0\n }\n JsonStr = session.post(url_firstBasic, headers=headers, cookies=cookies, data=data)\n if JsonStr.status_code == 500:\n print(JsonStr.reason)\n else:\n firstBasic = JsonStr.json()\n with open(firstBasicFile, 'w') as f:\n json.dump(firstBasic, f, ensure_ascii=False, indent=0)\n else:\n with open(firstBasicFile, \"r\") as f:\n firstBasic = json.load(f)\n\n GetCousesFile = os.path.join(SavePath, \"GetCouses.json\")\n if not os.path.isfile(GetCousesFile):\n data = {\n \"examid\":examid,\n }\n JsonStr = session.post(url_GetCouse, headers=headers, cookies=cookies, data=data)\n if JsonStr.status_code == 500:\n print(JsonStr.reason)\n else:\n GetCouses = JsonStr.json()\n with open(GetCousesFile, 'w') as f:\n json.dump(GetCouses, f, ensure_ascii=False, indent=0)\n else:\n with open(GetCousesFile, \"r\") as f:\n GetCouses = json.load(f)\n\n for Basic in firstBasic[\"Data\"]:\n if Basic[\"vtYear\"] != 2019:\n for Course in GetCouses[\"Data\"]:\n CourseInfo = {\n \"vtfid\":Basic[\"vtfid\"],\n \"vtYear\": Basic[\"vtYear\"],\n \"vtTitle\": Basic[\"vtTitle\"],\n \"classname\": Course[\"title\"],\n \"videolist\": []\n }\n # if Course[\"title\"]==\"消防规范\":\n # print(\"\")\n data = {\n \"examid\": examid,\n \"courseid\": Course[\"courseId\"],\n \"vtfid\": Basic[\"vtfid\"]\n }\n GetCourseVideoUrl(session, CourseInfo, data)\n pass\n print(Basic)\n pass\n","sub_path":"爬虫/爬虫-消防工程师网课.py","file_name":"爬虫-消防工程师网课.py","file_ext":"py","file_size_in_byte":6407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"615116342","text":"import tensorflow as tf\nimport sys\n\nfrom data.inex.preproc import parse_and_preproc_data\nfrom genmodel.htmm.bottom_up import BottomUpHTMM\nfrom genmodel.htmm.top_down import TopDownHTMM\nfrom htmn.generative_inference import generative_inference\nfrom htmn.rdn import RDN\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n # Restrict TensorFlow to only use the first GPU\n try:\n tf.config.experimental.set_visible_devices([], 'GPU')\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPU\")\n except RuntimeError as e:\n # Visible devices must be set before GPUs have been initialized\n print(e)\n\ndataset, n_bu, n_td, C, batch_size = sys.argv[1], int(sys.argv[2]), int(sys.argv[3]), \\\n int(sys.argv[4]), int(sys.argv[5])\nfeatures, labels = parse_and_preproc_data(dataset)\n\nsss = StratifiedShuffleSplit(n_splits=1, test_size=0.33, random_state=0)\ntrain_index, eval_index = list(sss.split(features['tree'], labels))[0]\n\ntrain_data, train_lab = {'tree': features['tree'][train_index],\n 'limits': features['limits'][train_index]}, labels[train_index]\n\neval_data, eval_lab = {'tree': features['tree'][eval_index],\n 'limits': features['limits'][eval_index]}, labels[eval_index]\n\n\nbu_model = BottomUpHTMM(n_bu, C, 32, 366)\ntd_model = TopDownHTMM(n_td, C, 32, 366)\nrdn = RDN('C', 11, n_bu, n_td)\n\nadam_opt = tf.keras.optimizers.Adam(learning_rate=1e-3)\n\ncce = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\nloss_mean = tf.keras.metrics.Mean()\n\naccuracy_mean = tf.keras.metrics.Mean()\naccuracy = tf.keras.metrics.Accuracy()\n\ntrain_dataset = tf.data.Dataset.from_tensor_slices((train_data, train_lab))\ntrain_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)\n\neval_dataset = tf.data.Dataset.from_tensor_slices((eval_data, eval_lab)).batch(batch_size)\n\n\n@tf.function\ndef train_step(batch_features, batch_labels, bu_model, td_model, rdn, adam_opt):\n with tf.GradientTape() as bu_tape:\n bu_likelihood = generative_inference(batch_features, bu_model)\n neg_bu_likelihood = -1 * tf.reduce_mean(bu_likelihood, axis=0)\n\n with tf.GradientTape() as td_tape:\n td_likelihood = generative_inference(batch_features, td_model)\n neg_td_likelihood = -1 * tf.reduce_mean(td_likelihood, axis=0)\n\n with tf.GradientTape() as rdn_tape:\n logits = rdn(bu_likelihood, td_likelihood)\n one_hot = tf.one_hot(batch_labels, 11)\n loss = cce(one_hot, logits)\n\n bu_grads = bu_tape.gradient(neg_bu_likelihood, bu_model.trainable_weights)\n td_grads = td_tape.gradient(neg_td_likelihood, td_model.trainable_weights)\n rdn_grads = rdn_tape.gradient(loss, rdn.trainable_weights)\n\n adam_opt.apply_gradients(zip(bu_grads, bu_model.trainable_weights))\n adam_opt.apply_gradients(zip(td_grads, td_model.trainable_weights))\n adam_opt.apply_gradients(zip(rdn_grads, rdn.trainable_weights))\n\n return loss\n\n\n@tf.function\ndef eval_step(batch_features, batch_labels, bu_model, td_model, rdn):\n bu_likelihood = generative_inference(batch_features, bu_model)\n td_likelihood = generative_inference(batch_features, td_model)\n\n logits = rdn(bu_likelihood, td_likelihood)\n loss = cce(batch_labels, logits)\n\n predictions = tf.argmax(logits, axis=0)\n acc = accuracy(batch_labels, predictions)\n return loss, acc\n\n\nfor epoch in range(100):\n tf.print('Start of epoch %d' % (epoch,))\n for step, (batch_features, batch_labels) in enumerate(train_dataset):\n tf.print(\"Step\", step)\n loss = train_step(batch_features, batch_labels, bu_model, td_model, rdn, adam_opt)\n\n weight = [batch_labels.shape[0]/batch_size]\n loss_mean.update_state(loss, [weight])\n\n if step % 10 == 0:\n print(\" Loss during step\", step, \"=\", loss_mean.result().numpy())\n loss_mean.reset_states()\n\n loss_mean.reset_states()\n print('Starting evaluation %d' % (epoch, ))\n for batch_features, batch_labels in eval_dataset:\n loss, acc = eval_step(batch_features, batch_labels, bu_model, td_model, rdn)\n\n weight = [batch_labels.shape[0]/batch_size]\n accuracy_mean.update_state(acc, [weight])\n loss_mean.update_state(loss, [weight])\n\n print('Evaluation result:')\n print(' Loss = ', loss_mean.result().numpy())\n print(' Accuracy = ', accuracy_mean.result().numpy())\n accuracy_mean.reset_states()\n loss_mean.reset_states()\n","sub_path":"htmn_inex_main.py","file_name":"htmn_inex_main.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"74437095","text":"\n\"\"\"\nWritten by Albert \"Anferensis\" Ong\n\"\"\"\n\nimport os\nfrom bge import logic\nfrom bge import texture\n\n\ndef main():\n \n cont = logic.getCurrentController()\n owner = cont.owner\n \n secondary = owner.sensors[\"Secondary\"]\n \n if secondary.positive and owner[\"Character Selected\"]:\n owner[\"Character Selected\"] = False\n \n objects = logic.getCurrentScene().objects\n \n select_character_text = objects[\"Select Character Text\"]\n select_character_text.text = \"SELECT CHARACTER\"\n \n \n character_portrait_glow = objects[\"Character Portrait Glow\"]\n character_portrait_glow.visible = False\n \n \n character_portrait = objects[\"Character Portrait\"]\n matID = texture.materialID(character_portrait, \"MA\" + \"Character Portrait\") \n tex = texture.Texture(character_portrait, matID, 0)\n \n # Specify path to image.\n os.chdir(logic.expandPath(\"//\"))\n os.chdir(\"../Rendered Images/Character Portraits\")\n cwd = os.getcwd()\n \n image_path = cwd + \"/black.png\"\n tex.source = texture.ImageFFmpeg(image_path)\n\n owner[\"texture_property\"] = tex\n tex.refresh(True)\n\n\n\n#======================================================\n\nmain()\n\n\n","sub_path":"UI/Buttons/character_deselect.py","file_name":"character_deselect.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"572896292","text":"import pandas as pd\nimport numpy as np\n\ndf = pd.read_csv('data2000_2020.csv')\n\n##Drop all ticker symbols which are not in sp500_constituents_wikipedia.csv\nticker_list = pd.read_csv('../../sp500_constituents_wikipedia.csv', header=None)\n\nfor index, row in df[:5].iterrows():\n if row['tic'] not in ticker_list[0].tolist():\n df.drop(index=index, inplace=True)\n##=> All tickers are correct, no drops\n\n#analyze empty cells\nrelevant_columns = ['at', 'ebit', 'lt', 'sale', 'act', 'dltt', 'dv']\nfor column in relevant_columns:\n print(column, 'has', df[column].isnull().sum(), 'empty cells of', len(df), 'cells in total')\n\n##calculate financial ratios\ndf['ebit_margin'] = df['ebit'] / df['sale']\ndf['leverage'] = df['lt'] / df['at']\ndf['div_ebit'] = df['dv'] / df['ebit']\n\n#save data\ndf.to_csv('data2000_2020_edited.csv')","sub_path":"data/financial_data/data_overview.py","file_name":"data_overview.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"433377162","text":"#! python3\r\n#\r\n# NAME : remove_sensitive_info.py\r\n#\r\n# DESCRIPTION : Removes sensitive information such as Social Security\r\n# or credit card numbers from the clipboard.\r\n#\r\n# AUTHOR : Tim Kornev (@Timmate on GitHub)\r\n#\r\n# CREATED DATE : 6th of August, 2016\r\n\r\n\r\nimport re\r\n\r\nimport pyperclip\r\n\r\n\r\n# Assume that credit card number is formatted like this: XXXX XXXX XXXX XXXX\r\n# and Social Security number is formatted like this: XXX-XX-XXXX\r\ncredit_card_number_regex = re.compile(r'\\d{3}-\\d{2}-\\d{4}')\r\nsocial_secirity_number_regex = re.compile(r'\\d{4} \\d{4} \\d{4} \\d{4}')\r\n\r\n# Copy the text from the clipboard.\r\ntext = pyperclip.paste()\r\n\r\n# Remove the sensetive information.\r\ntext = credit_card_number_regex.sub('', text)\r\ntext = social_security_number_regex.sub('', text)\r\n\r\n# Copy the result text to the clipboard.\r\npyperclip.copy(text)\r\n\r\nprint()\r\nprint('Done.')\r\nprint()\r\n","sub_path":"Chapter 07 – Pattern Matching with Regular Expressions/remove_sensitive_info.py","file_name":"remove_sensitive_info.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"16894923","text":"#from mock import Mock\nfrom pprint import pprint\n\nfrom django.test import TestCase\nfrom django.test.client import RequestFactory\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom django.utils import translation\n\nfrom id.middleware import Timezone, Language\n\nclass TimezoneTest(TestCase):\n def setUp(self):\n self.user = User.objects.create_user(username='test_user', email='devnull@yandex.ru', password='password')\n self.user.save()\n\n # request\n self.request_factory = RequestFactory()\n\n def test_middleware_default(self):\n self.assertEqual(self.user.profile.language, 'en')\n self.assertEqual(self.user.profile.timezone, 'UTC')\n\n timezone_middleware = Timezone()\n language_middleware = Language()\n\n request = self.request_factory.get('/id/login/')\n request.user = self.user\n\n timezone_middleware.process_request(request)\n language_middleware.process_request(request)\n\n self.assertEqual(translation.get_language(), 'en')\n self.assertEqual(str(timezone.get_current_timezone()), 'UTC')\n\n def test_middleware_check(self):\n self.user.profile.timezone = 'Europe/Moscow'\n self.user.profile.language = 'ru'\n self.user.save()\n\n self.assertEqual(self.user.profile.timezone, 'Europe/Moscow')\n self.assertEqual(self.user.profile.language, 'ru')\n\n timezone_middleware = Timezone()\n language_middleware = Language()\n\n request = self.request_factory.get('/id/login/')\n request.user = self.user\n\n timezone_middleware.process_request(request)\n language_middleware.process_request(request)\n\n self.assertEqual(str(timezone.get_current_timezone()), 'Europe/Moscow')\n self.assertEqual(translation.get_language(), 'ru')\n","sub_path":"tests/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"277328144","text":"import math\n\n\"\"\"\nProblem: 544. Output Contest Matches\nUrl: https://leetcode.com/articles/output-contest-matches/ \nAuthor: David Wang\nDate: 9/10/2018\n\"\"\"\n\nclass Solution(object):\n\n def findContestMatch(self, n):\n matches = [i+1 for i in range(n)]\n # Property of Logs: logb(a) = log(a)/log(b)\n print(self.matchIter(n, matches)[0])\n\n def findContestMatchRecur(self, n):\n matches = [i+1 for i in range(n)]\n # Property of Logs: logb(a) = log(a)/log(b)\n self.matchRecur(n, matches)\n print(matches[0])\n\n def matchIter(self, n, matches):\n for i in range(int(math.log(n, 2))):\n cur_match = []\n size = len(matches)\n for j in range(int(size/2)):\n cur_match.append('({},{})'.format(matches[j], matches[int(n/(i+1)) - 1 - j]))\n matches = cur_match\n return matches\n\n def matchRecur(self, n, matches):\n if n == 1:\n return\n for i in range(n):\n matches[i] = '({},{})'.format(matches[i], matches[n-1-i])\n\n self.matchRecur(int(n/2), matches)\n\nif __name__ == '__main__':\n s = Solution()\n #s.findContestMatchRecur(8)\n s.findContestMatch(8)\n","sub_path":"544_Output_Contest_Matches/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271722021","text":"class Solution(object):\n def reverseWords(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n reverse = s[::-1].split()\n return ' '.join([word[::-1] for word in reverse])\n\nif __name__ == \"__main__\": \n print(Solution().reverseWords(\"the sky is blue\")) \n","sub_path":"151_Reverse Words in a String.py","file_name":"151_Reverse Words in a String.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"411824663","text":"import math\nfrom os import path\nimport argparse\nimport numpy as np\nimport json\n\nfrom alternative_approaches.timegan_ydata.src.ydata_synthetic.synthesizers.timeseries import TimeGAN\nfrom data_loading import real_data_loading, sine_data_generation\nfrom metrics.discriminative_metrics import discriminative_score_metrics\nfrom metrics.predictive_metrics import predictive_score_metrics\nfrom metrics.visualization_metrics import alternative_visualization, visualization\n\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\ndef main(args):\n\n seq_len = args.seq_len\n hidden_dim = args.hidden_dim\n scale_train = args.scale_train\n gamma = 1\n\n noise_dim = 32\n dim = 0\n batch_size = args.batch_size\n\n log_step = 100\n learning_rate = 5e-4\n\n data = list()\n if args.data_name in ['stock', 'energy']:\n if args.data_name in ['stock']:\n dim = 6\n else:\n dim = 28\n print('Using ' + args.data_name + 'data set.')\n data = real_data_loading(args.data_name, args.seq_len)\n elif args.data_name == 'sine':\n print('Using sine data set.')\n # Set number of samples and its dimensions\n no, dim = 10000, 5\n data = sine_data_generation(no, args.seq_len, dim)\n\n gan_args = [batch_size, learning_rate, noise_dim, 24, 2, (0, 1), dim]\n\n if args.trained_model != 'none' and path.exists(args.trained_model):\n print('Using trained model.')\n synth = TimeGAN.load(args.trained_model)\n output(data, seq_len, synth, args.model + '-' + args.data_name, args.metric_iteration)\n else:\n synth = TimeGAN(model_parameters=gan_args, hidden_dim=hidden_dim, seq_len=seq_len, n_seq=dim, gamma=gamma, scale_train=scale_train)\n\n iterations = args.iteration\n save_every_iterations = args.save_itt\n\n if save_every_iterations > 0 and False:\n print('Saving model every ' + str(save_every_iterations) + ' iterations.')\n rounds = math.ceil(iterations / save_every_iterations)\n iterations = math.ceil(iterations / rounds)\n else:\n rounds = 1\n\n for r in range(rounds):\n itt = (r + 1) * iterations\n # synth.train(stock_data, train_steps=5)\n synth.train(data, train_steps=iterations)\n synth.save('results/model/' + args.model + '-' + args.data_name + '-' + str(itt) + 'itt.pkl')\n\n output(data, seq_len, synth, args.model + '-' + args.data_name + '-' + str(itt) + 'itt', args.metric_iteration)\n\n\ndef output(ori_data, seq_len, model, name, metric_iteration):\n synth_data = model.sample(len(ori_data))[:len(ori_data)]\n print(len(synth_data))\n\n metric_results = dict()\n\n # 1. Discriminative Score\n discriminative_score = list()\n for _ in range(metric_iteration):\n temp_disc = discriminative_score_metrics(ori_data, synth_data)\n discriminative_score.append(temp_disc)\n\n metric_results['discriminative'] = np.mean(discriminative_score)\n\n # 2. Predictive score\n predictive_score = list()\n for tt in range(metric_iteration):\n temp_pred = predictive_score_metrics(ori_data, synth_data)\n predictive_score.append(temp_pred)\n\n metric_results['predictive'] = np.mean(predictive_score)\n print(metric_results)\n\n with open('results/metrics/' + name + '-metrics.json', 'w') as json_file:\n json.dump(metric_results, json_file, cls=NumpyEncoder)\n\n # 3. Visualization (PCA and t-SNE)\n visualization(ori_data, synth_data, 'pca', name)\n visualization(ori_data, synth_data, 'tsne', name)\n\n alternative_visualization(ori_data, synth_data, seq_len, name)\n\nif __name__ == '__main__':\n\n # Inputs for the main function\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--model',\n choices=['timegan'],\n default='timegan',\n type=str)\n parser.add_argument(\n '--save_itt',\n default=0,\n type=int)\n parser.add_argument(\n '--trained_model',\n default='none',\n type=str)\n parser.add_argument(\n '--data_name',\n choices=['sine','stock','energy'],\n default='stock',\n type=str)\n parser.add_argument(\n '--seq_len',\n help='sequence length',\n default=24,\n type=int)\n parser.add_argument(\n '--scale_train',\n help='scaled training',\n default=1,\n type=int)\n parser.add_argument(\n '--module',\n choices=['gru','lstm','lstmLN'],\n default='gru',\n type=str)\n parser.add_argument(\n '--hidden_dim',\n help='hidden state dimensions (should be optimized)',\n default=24,\n type=int)\n parser.add_argument(\n '--num_layer',\n help='number of layers (should be optimized)',\n default=3,\n type=int)\n parser.add_argument(\n '--iteration',\n help='Training iterations (should be optimized)',\n default=50000,\n type=int)\n parser.add_argument(\n '--batch_size',\n help='the number of samples in mini-batch (should be optimized)',\n default=128,\n type=int)\n parser.add_argument(\n '--metric_iteration',\n help='iterations of the metric computation',\n default=10,\n type=int)\n\n args = parser.parse_args()\n\n # Calls main function\n main(args)","sub_path":"TimeGAN - Jan Mark/experiment_ydata.py","file_name":"experiment_ydata.py","file_ext":"py","file_size_in_byte":5413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"639132118","text":"#Embedded file name: localizationBSD/exporters\\localizationXMLResourceExporter.py\nimport xml.etree.ElementTree\nimport os\nimport zipfile\nfrom . import LocalizationExporterError\nimport localizationExporter\n\nclass LocalizationXMLResourceExporter(localizationExporter.LocalizationExporterBase):\n \"\"\"\n Exporter that creates *.xml resource file with text data from zlocalization tables. The data doesnt include metadata and\n metadata related content. This is used by non-game clients.\n \"\"\"\n EXPORT_DESCRIPTION = 'Exports language data into .xml file, that is then used by Launcher (or any other non-game client) to load language strings.\\nWhere:\\nexportLocation - location of folder under which the resource xml file is created\\nexportFileName - name of the language resource file to write.'\n FILE_EXT = '.xml'\n XML_TEXT_ROOT = 'TextResource'\n XML_LANGUAGES = 'languages'\n XML_LANGUAGE = 'language'\n XML_LANGUAGE_ID = 'languageID'\n XML_LANGUAGE_NAME = 'name'\n XML_TRANSLATED_NAME = 'translatedName'\n XML_TEXTS = 'texts'\n XML_MESSAGE = 'message'\n XML_LABEL = 'label'\n XML_TEXT = 'text'\n XML_STRING = 'string'\n\n @classmethod\n def ExportWithProjectSettingsToZipFileObject(cls, projectID, fileObject, exportFileName, getSubmittedOnly = True, bsdBranchID = None):\n if not exportFileName:\n exportFileName = 'localization'\n rootElement = cls._CreateXMLElements(projectID, getSubmittedOnly)\n textsElementTree = xml.etree.ElementTree.ElementTree(rootElement)\n zipDataFile = zipfile.ZipFile(fileObject, 'w')\n\n class dummy:\n pass\n\n data = []\n f = dummy()\n f.write = data.append\n textsElementTree.write(f, 'utf-8', xml_declaration=True)\n data = ''.join(data)\n zipDataFile.writestr(exportFileName + '.xml', data)\n zipDataFile.close()\n return (zipDataFile, [exportFileName + '.xml'])\n\n @classmethod\n def ExportWithProjectSettings(cls, projectID, exportLocation, exportFileName, getSubmittedOnly = True, **kwargs):\n \"\"\"\n Execute this export method for specified project, with project settings provided.\n Method queries the DB and writes language data into .xml file in the specified folder\n NOTE: inherited from LocalizationExporterBase\n parameters:\n projectID - ID of specific project to select data for. This identifies what content will be exported.\n exportLocation - location of folder under which the resource xml file is created\n exp: \"root:/tools/launcher/\"\n exportFileName - name of the language resource file to write.\n exp: \"localization\"\n getSubmittedOnly - flag to indicate if need to write submitted only BSD entries.\n If True is passed, only submitted BSD entries are written into\n pickles. Otherwise, latest submitted and unsubmitted BSD \n entries are written into pickles.\n returns:\n list of new file paths\n \"\"\"\n if not exportLocation or not exportFileName:\n raise LocalizationExporterError('Filepath strings are incomplete. exportLocation, exportFileName: %s, %s.' % (exportLocation, exportFileName))\n exportedFilenames = []\n rootElement = cls._CreateXMLElements(projectID, getSubmittedOnly)\n textsElementTree = xml.etree.ElementTree.ElementTree(rootElement)\n try:\n exportedFilenames = [os.path.join(exportLocation, exportFileName + cls.FILE_EXT)]\n textsElementTree.write(exportedFilenames[0], encoding='utf-8', xml_declaration=True)\n except TypeError as anError:\n newMessage = \"Is there perhaps an attribute on XML Element with None value? ElementTree doesn't like that.\"\n raise TypeError(anError.args, newMessage)\n\n return exportedFilenames\n\n @classmethod\n def _CreateXMLElements(cls, projectID, getSubmittedOnly):\n \"\"\"\n \"\"\"\n exportData = cls._GetLocalizationMessageDataForExport(projectID, getSubmittedOnly)\n folderPathToLabelsIndex = exportData[0]\n messagesDict = exportData[1]\n languageCodesResultSet = exportData[2]\n rootElement = xml.etree.ElementTree.Element(tag=cls.XML_TEXT_ROOT)\n languagesElement = xml.etree.ElementTree.Element(cls.XML_LANGUAGES)\n rootElement.append(languagesElement)\n langDict = sm.GetService('cache').Rowset(const.cacheLocalizationLanguages).Index('languageID')\n for aLanguage in languageCodesResultSet:\n try:\n langName = langDict[aLanguage.languageID].languageName\n except KeyError:\n langName = 'Unknown language %s' % aLanguage.languageID\n\n attributes = {cls.XML_LANGUAGE_ID: aLanguage.languageID,\n cls.XML_LANGUAGE_NAME: langName}\n languagesElement.append(xml.etree.ElementTree.Element(cls.XML_LANGUAGE, attrib=attributes))\n\n textElement = xml.etree.ElementTree.Element(cls.XML_TEXTS)\n rootElement.append(textElement)\n for messageID, messageObj in sorted(messagesDict.iteritems(), key=lambda x: x[1].labelPath):\n messageElement = xml.etree.ElementTree.Element(cls.XML_MESSAGE, attrib={cls.XML_LABEL: messageObj.labelPath})\n textElement.append(messageElement)\n for languageID, textRow in messageObj.GetAllTextDict().iteritems():\n stringElement = xml.etree.ElementTree.Element(cls.XML_TEXT, attrib={cls.XML_LANGUAGE_ID: languageID,\n cls.XML_STRING: textRow.text})\n messageElement.append(stringElement)\n\n return rootElement\n\n @classmethod\n def GetResourceNamesWithProjectSettings(cls, projectID, exportLocation, exportFileName, getSubmittedOnly = True, **kwargs):\n \"\"\"\n Queries DB for enabled languages and returns list of files that ExportWithProjectSettings\n is expected to generate.\n NOTE: inherited from LocalizationExporterBase\n parameters:\n projectID - ID of specific project to select data for. This identifies what content will be exported.\n exportLocation - location of folder under which the resource xml file is created\n exp: \"root:/tools/launcher/\"\n exportFileName - name of the language resource file to write.\n exp: \"localization\"\n getSubmittedOnly - flag to indicate if need to write submitted only BSD entries.\n If True is passed, only submitted BSD entries are written into\n pickles. Otherwise, latest submitted and unsubmitted BSD \n entries are written into pickles.\n returns:\n list of new file paths\n \"\"\"\n return [os.path.join(exportLocation, exportFileName + cls.FILE_EXT)]\n","sub_path":"eve-8.51.857815/localizationBSD/exporters/localizationXMLResourceExporter.py","file_name":"localizationXMLResourceExporter.py","file_ext":"py","file_size_in_byte":7052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"584851024","text":"import sys\nsys.path.insert(0, '/home/mrfreedeer/aima-python')\nfrom search import *\n\n\nclass MissionariesCannibals(Problem):\n def __init__(self,initial, goal):\n Problem.__init__(self,initial,goal)\n self.state = initial\n def actions(self,state):\n if state[2] == 1:\n if state[0] == 3:\n if state[1] == 3:\n return [\"MC\", \"CC\"]\n else:\n return [\"MC\"]\n elif state[0] == 2:\n if state[1] == 2:\n return [\"MC\", \"CC\"]\n else:\n return [\"MC\"]\n else:\n return [\"MC\"]\n else:\n if state[0] == 3:\n return [\"C\", \"CC\"]\n elif state[0] == 2:\n return [\"MC\", \"M\"]\n else:\n return [\"MC\"]\n def result(self,state,action):\n if state == [3,3,1]:\n if action == \"MC\":\n return [2,2,0]\n else:\n return [3,1,0]\n elif state == [3,2,1]:\n if action == \"MC\":\n return [2,1,0]\n else:\n return [3,0,0]\n elif state == [3,1,1]:\n if action == \"MC\":\n return [2,0,0]\n elif state == [2,2,1]:\n if action == \"MC\":\n return [1,1,0]\n else:\n return [2,0,0]\n elif state == [1,1,1]:\n return [0,0,0]\n elif state == [3,1,0]:\n if action == \"CC\":\n return [3,3,1]\n else:\n return [3,2,1]\n elif state == [3,0,0]:\n if action == \"CC\":\n return [3,2,1]\n else:\n return [3,1,1]\n elif state == [2,2,0]:\n if action == \"MC\":\n return [3,3,1]\n else:\n return [3,2,1]\n elif state == [1,1,0]:\n if action == \"MC\":\n return [2,2,1]\n\n def goal_test(self,state):\n if state == self.goal:\n return True\n else:\n return False\n\ndef main():\n cannibals = MissionariesCannibals([3,3,1], [0,0,0])\n results = breadth_first_tree_search(cannibals)\n print(results)\nif __name__ == '__main__':\n main()\n","sub_path":"Cannibals and Missionaries/cannandmissionaries.py","file_name":"cannandmissionaries.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"184099265","text":"import os\nimport sys\n\nimport pytest\n\nimport mpmath\n\n\ndef pytest_report_header(config):\n print(\"mpmath backend: %s\" % mpmath.libmp.backend.BACKEND)\n print(\"mpmath mp class: %s\" % repr(mpmath.mp))\n print(\"mpmath version: %s\" % mpmath.__version__)\n print(\"Python version: %s\" % sys.version)\n\n\ndef pytest_configure(config):\n config.addinivalue_line('markers', 'slow: marks tests as slow')\n\n\n@pytest.fixture(autouse=True)\ndef reset_mp_globals():\n from mpmath import mp, iv\n mp.dps = 15\n mp.pretty = False\n iv.dps = 15\n iv.pretty = False\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"349531434","text":"# Copyright 2021 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Splits LEAF generated datasets and creates individual client partitions.\"\"\"\nimport argparse\nimport json\nimport pickle\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Tuple\n\n\ndef check_between_zero_and_one(value: str):\n \"\"\"Tests if value is between 0 an 1.\"\"\"\n fvalue = float(value)\n if fvalue < 0 or fvalue > 1:\n raise argparse.ArgumentTypeError(\n f\"\"\"Invalid partition fraction {fvalue}. This must be between [0,1].\"\"\"\n )\n return fvalue\n\n\ndef save_partition(save_root: Path, user_idx: int, dataset: str, data: Dict[str, str]):\n \"\"\"Saves partition for specific client.\n\n Args:\n save_root (Path): Root folder where to save partition\n user_idx (int): User ID\n dataset (str): Dataset {train, validation, test}\n data (Dict[str,str]): Dataset {train, validation, test}\n \"\"\"\n save_dir = save_root / str(user_idx)\n save_dir.mkdir(parents=True, exist_ok=True)\n with open(save_dir / f\"{dataset}.pickle\", \"wb\") as save_file:\n pickle.dump(data, save_file)\n\n\ndef process_user(\n json_file: Dict[str, Any],\n user_idx: str,\n user_str: str,\n list_datasets: List[Tuple[str, float]],\n save_root: Path,\n):\n \"\"\"Creates and saves partition for user.\n\n Args:\n json_file (Dict[str, Any]): JSON file containing user data\n user_idx (str): User ID (counter) in string format\n user_str (str): Original User ID\n list_datasets (List[Tuple[str, float]]): List of datasets and relative fractions\n save_root (Path): Root folder where to save the partition\n \"\"\"\n sentence = json_file[\"user_data\"][user_str][\"x\"]\n next_char = json_file[\"user_data\"][user_str][\"y\"]\n start_idx = 0\n\n for split_id, (dataset, fraction) in enumerate(list_datasets):\n end_idx = start_idx + int(fraction * len(sentence))\n if split_id == len(list_datasets) - 1: # Make sure we use last indices\n end_idx = len(sentence)\n data = {}\n data[\"idx\"] = user_idx\n data[\"character\"] = user_str\n data[\"x\"] = sentence[start_idx:end_idx]\n data[\"y\"] = next_char[start_idx:end_idx]\n start_idx = end_idx\n\n save_partition(save_root, user_idx, dataset, data)\n\n\ndef split_json_and_save(\n list_datasets: List[Tuple[str, float]],\n path_to_json: Path,\n save_root: Path,\n prev_users_list: Optional[List[str]] = None,\n):\n \"\"\"Splits LEAF generated datasets and creates individual client partitions.\n\n Args:\n list_datasets (List[Tuple[str, float]]): list containting dataset tags\n and fraction of dataset split.\n path_to_json (Path): Path to LEAF JSON file containing dataset.\n save_root (Path): Root directory where to save the individual client\n partition files.\n \"\"\"\n users_list: List[str] = []\n new_users: List[str] = []\n with open(path_to_json) as open_file:\n json_file = json.load(open_file)\n if not prev_users_list:\n users_list = json_file[\"users\"]\n else:\n print(\"Using previous list of users.\")\n users_list = prev_users_list\n\n for user_idx, user_str in enumerate(users_list):\n new_users.append(user_str)\n process_user(json_file, user_idx, user_str, list_datasets, save_root)\n\n return new_users\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"\"\"Splits a LEAF Shakespeare train dataset into\n train/validation for each client and saves the clients'\n train/val/test dataset in their respective folder.\"\"\"\n )\n parser.add_argument(\n \"--save_root\",\n type=str,\n required=True,\n help=\"\"\"Root folder where partitions will be save as\n {save_root}/client_id/{train,val,test}.pickle\"\"\",\n )\n parser.add_argument(\n \"--leaf_train_json\",\n type=str,\n required=True,\n help=\"\"\"Complete path to JSON file containing the generated\n trainset for LEAF Shakespeare.\"\"\",\n )\n parser.add_argument(\n \"--val_frac\",\n type=check_between_zero_and_one,\n required=True,\n default=0.2,\n help=\"Fraction of original trainset that will be used for validation.\",\n )\n parser.add_argument(\n \"--leaf_test_json\",\n type=str,\n required=True,\n help=\"\"\"Complete path to JSON file containing the generated\n *testset* for LEAF Shakespeare.\"\"\",\n )\n\n args = parser.parse_args()\n\n # Split train dataset into train and validation\n # then save files for each client\n original_train_dataset = Path(args.leaf_train_json)\n train_frac = 1.0 - args.val_frac\n train_val_datasets = [(\"train\", train_frac), (\"val\", args.val_frac)]\n existing_users = split_json_and_save(\n list_datasets=train_val_datasets,\n path_to_json=original_train_dataset,\n save_root=Path(args.save_root),\n )\n\n # Split and save the test files\n original_test_dataset = Path(args.leaf_test_json)\n test_dataset = [(\"test\", 1.0)]\n split_json_and_save(\n list_datasets=test_dataset,\n path_to_json=original_test_dataset,\n save_root=Path(args.save_root),\n prev_users_list=existing_users,\n )\n","sub_path":"baselines/flwr_baselines/scripts/leaf/shakespeare/split_json_data.py","file_name":"split_json_data.py","file_ext":"py","file_size_in_byte":5938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"214655193","text":"def findingMid(n, x):\n if n % 2 == 0:\n mid = n // 2\n return (x[mid] + x[mid - 1]) / 2\n else:\n mid = n // 2\n return x[mid]\n\n\nN = int(input())\ndata = list(map(int, input().split()))\nF = list(map(int, input().split()))\nX = []\nfor i in range(N):\n for j in range(F[i]):\n X.append(data[i])\nX.sort()\nprint(X)\nif len(X) % 2 == 0:\n data1 = X[:len(X) // 2]\n data2 = X[len(X) // 2:]\nelse:\n data1 = X[:len(X) // 2]\n data2 = X[(len(X) // 2) + 1:]\nprint(data1,data2)\nq1 = findingMid(len(data1), data1)\nq3 = findingMid(len(data2), data2)\n\nprint(\"{0:.1f}\".format(q3-q1))\n","sub_path":"PythonChallenges/quartile.py","file_name":"quartile.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"285141769","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@file: server.py\n@time: 2018/11/5 17:27\n'''\nimport pika\n\ncred = pika.PlainCredentials(username='root',password='123456')\nconn = pika.ConnectionParameters(host='192.168.52.98',credentials=cred)\nconnection = pika.BlockingConnection(conn)\nchannel = connection.channel()\n\nchannel.queue_declare(queue='rpc_queue')\n\ndef fib(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)\n\ndef on_request(ch,method,properties,body):\n # 收到消息转换成int类型\n n=int(body)\n print('fib %s' % n)\n\n # 要处理的任务\n response = fib(n)\n\n # 发布消息,通知到客户端\n ch.basic_publish(exchange='',\n routing_key=properties.reply_to,\n properties=pika.BasicProperties(correlation_id=properties.correlation_id),\n body=str(response))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\nchannel.basic_qos(prefetch_count=1)\nchannel.basic_consume(on_request,\n queue='rpc_queue')\nprint('awaiting Rpc...')\nchannel.start_consuming()\n\n","sub_path":"rabbitmq/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"74170270","text":"import torch\nimport pandas as pd\nimport skimage.io as io\nimport clip\nfrom PIL import Image\nimport pickle\nimport argparse\nfrom tqdm import tqdm, trange\nfrom os.path import join\n# from loguru import logger\n\n\ndef main():\n device = torch.device('cuda:0')\n \n clip_model, preprocess = clip.load('../clip_pretrain/ViT-B-32.pt', device=device, jit=False)\n \n image_path = ''\n df = pd.read_csv('./corpus/ai_challenge_train_caption.csv')\n print(df.head())\n \n base_dir = './corpus/ai_challenger_caption_train_20170902/caption_train_images_20170902/'\n \n image_id2embed = {}\n for img_name in tqdm(list(set(df['image_id'].tolist()))):\n img_path = base_dir + img_name \n try:\n image = io.imread(img_path)\n except:\n continue\n\n image = preprocess(Image.fromarray(image)).unsqueeze(0).to(device)\n with torch.no_grad():\n clip_embed = clip_model.encode_image(image).cpu()\n image_id2embed[img_name] = clip_embed\n \n pickle.dump(image_id2embed, open('ai_val_image_id2embed.pkl', 'wb'))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Image_Caption/Clip_Caption/data/step32_process_image_encode_ai_train.py","file_name":"step32_process_image_encode_ai_train.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"607976692","text":"# List of tuples\n# Each tuple contains a test: the first element are the inputs, the second are the output, and the third is the message in case of an error\n# To test another case, add another tuple\n\ninput_values = [\n (\n # Inputs\n [\"1.5\",\"3\"],\n # Outputs\n [\"Número: \", \"Decimales a mostrar: \", \"Razón áurea: 2.427\"],\n # Error message\n \"Revisa los tipos de dato de tus variables. Revisa el llamado a la librería y revisa la ortografía\"\n ),\n (\n # Inputs\n [\"4.7\",\"5\"],\n # Outputs\n [\"Número: \", \"Decimales a mostrar: \", \"Razón áurea: 7.60476\"],\n # Error message\n \"Revisa los tipos de dato de tus variables. Revisa el llamado a la librería y revisa la ortografía\"\n )\n ]","sub_path":"assignments/17RazonAurea/tests/input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"469942808","text":"#! /usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport win32com.client\n#\n# ExcelApp = win32com.client.Dispatch(\"Excel.Application\")\n# ExcelApp.Visible = True\n#\n# xlsPathName = r\"G:\\github\\games_tool\\test\\q1.xls\"\n#\n# wBook = ExcelApp.Workbooks.Open(xlsPathName)\n# wSheet = wBook.Worksheets(1)\n# wSheet.PageSetup.CenterHeader = ' '\n# wSheet.PageSetup.CenterFooter = ' '\nimport os\nall = []\nfor root, dirs, files in os.walk(r'e:\\tool\\data'):\n # print(os.path.join(root, files))\n if '龙华大浪' in root:\n for file in files:\n print(1)\n all.append(os.path.join(root, file))\n\n# print(123)\nprint(all)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"112766757","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport unittest\n\nclass NewVisitorTest(unittest.TestCase):\n\n\tdef setUp(self):\n\t\tself.browser = webdriver.Firefox()\n\t\tself.browser.implicitly_wait(3)\n\n\tdef tearDown(self):\n\t\tself.browser.quit()\n\t\n\tdef check_for_row_in_list_table(self, row_text):\n\t\ttable = self.browser.find_element_by_id('id_list_table')\n\t\trows = table.find_elements_by_tag_name('tr')\n\t\tself.assertIn(row_text, [row.text for row in rows])\n\n\tdef test_can_start_a_list_and_retrive_it_later(self):\n\t\t#blah blah blah\n\t\t#game list : darkest dungeon\n\t\tself.browser.get('http://localhost:8000')\n\n\t\t#civilization V\n\t\tinputbox = self.browser.find_element_by_id('id_new_item')\n\t\tinputbox.send_keys(Keys.ENTER)\n\t\tself.check_for_row_in_list_table('1: Buy peacock feathers')\n\n\t\tinputbox.send_keys('Use peacock feathers to make a fly')\n\t\tinputbox.send_keys(Keys.ENTER)\n\n\t\tself.check_for_row_in_list_table('1: Buy peacock feathers')\n\t\tself.check_for_row_in_list_table('2: Use peacock feathers to make a fly')\n\n\t\n\t\tself.fail('Finish the test!')\n\n\t\t#dungeon crawl stone soup\n\t\t#and so on..\nif __name__ == '__main__':\n\tunittest.main(warnings='ignore')\n","sub_path":"functional_test.py","file_name":"functional_test.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"144891004","text":"import socket\nimport pickle\n\nznr = input('Enter znr number: ')\nsock = socket.socket()\nserver = ('localhost', 10000)\nsock.connect(server)\nsock.sendto(znr.encode(), server)\nfor key, value in pickle.loads(sock.recv(10000)).items():\n print(str(key) + '..........' + str(value))\n","sub_path":"teradata_user_manager_new/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616152389","text":"# -*- coding:utf-8 -*-\r\nimport os\r\nimport sys\r\n\r\n# 参数:(options,*args)第一个参数固定,剩下的参数可变;\r\n# 需要对执行脚本时传入的参数的数量和有效性进行判断,否则退出脚本\r\n# 判断文件及路径是否存在;写成修饰器是否会简单些?还是直接在函数体里判断比较好?\r\n\r\ndef copy(file,path):\r\n\tnew_file = os.path.join(path,os.path.split(file)[1])\r\n\tif os.path.isfile(file) and os.path.exists(path):\r\n\t\tif os.path.exists(new_file):\r\n\t\t\tprint('目标目录下已存在同名文件,退出程序')\r\n\t\telse:\r\n\t\t\twith open(file,mode='r',encoding='utf-8') as f1,open(new_file,mode='w',encoding='utf-8') as f2:\r\n\t\t\t\tfor line in f1:\r\n\t\t\t\t\tf2.write(line)\r\n\telse:\r\n\t\tprint('源文件或目的路径不存在,退出程序')\r\n\t\texit()\t#此处必须要带exit(),否则mv函数调用时,如果copy函数发现文件或目的路径不存在后再执行os.remove函数就会报错。添加exit(),则不再执行os.remove函数\r\n\r\ndef mv(file,path):\r\n\tcopy(file,path)\r\n\tos.remove(file)\r\n\r\ndef rename(src,dst):\r\n\tif os.path.isfile(src) or os.path.isdir(src):\r\n\t\tos.rename(src,dst)\r\n\telse:\r\n\t\tprint('找不到文件/文件夹')\r\n\r\ndef get_size(src):\r\n\tif os.path.isdir(src):\r\n\t\tsum_size,dirs = 0,[src]\r\n\t\twhile dirs:\r\n\t\t\tsrc = dirs.pop()\r\n\t\t\tdir_lst = os.listdir(src)\r\n\t\t\tfor name in dir_lst:\r\n\t\t\t\tfile_path = os.path.join(src,name)\r\n\t\t\t\tif os.path.isfile(file_path):\r\n\t\t\t\t\tsum_size += os.path.getsize(file_path)\r\n\t\t\t\telse:\r\n\t\t\t\t\tdirs.append(file_path)\r\n\t\t\tprint(sum_size)\r\n\t\t\treturn sum_size\r\n\telif os.path.isfile(src):\r\n\t\tprint(os.path.getsize(src))\r\n\t\treturn os.path.getsize(src)\r\n\telse:\r\n\t\tprint('找不到文件')\r\n\r\ndef auto_tools(*args):\r\n\tif len(iput) == 3 or len(iput) == 4:\r\n\t\tif args[0][1] in ['copy','mv','rename','size']:\r\n\t\t\tparam = args[0][1]\r\n\t\t\tif param == 'copy':\r\n\t\t\t\tcopy(args[0][2],args[0][3])\r\n\t\t\telif param == 'mv':\r\n\t\t\t\tmv(args[0][2],args[0][3])\r\n\t\t\telif param == 'rename':\r\n\t\t\t\trename(args[0][2],args[0][3])\r\n\t\t\telif param == 'size':\r\n\t\t\t\tget_size(args[0][2])\r\n\t\telse:\r\n\t\t\tprint('错误的操作项,退出程序')\r\n\telse:\r\n\t\tprint('参数数目错误,退出')\r\n\r\niput = sys.argv # ['homework301.py','options','src','dst']\r\nauto_tools(iput)\r\n\r\n","sub_path":"classes/day06/homework/homework308.py","file_name":"homework308.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"45224397","text":"\"\"\"\nMinimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy)\nBSD License\n\"\"\"\nimport numpy as np\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\ndef run(write_to):\n start = time.time()\n data = open('graham.txt', 'r').read() # should be simple plain text file\n chars = list(set(data))\n data_size, vocab_size = len(data), len(chars)\n print('data has %d characters, %d unique.' % (data_size, vocab_size))\n char_to_ix = { ch:i for i,ch in enumerate(chars) }\n ix_to_char = { i:ch for i,ch in enumerate(chars) }\n\n # hyper-parameters\n hidden_size = 50 # size of hidden layer of neurons\n seq_length = 20 # number of steps to unroll the RNN for\n batch_size = 20\n learning_rate = 1e-1\n n_iter = 5000\n iter_step = 100\n\n torch.manual_seed(1)\n\n def lineToTensor(line):\n tensor = torch.zeros(seq_length, batch_size, vocab_size)\n for i in range(seq_length):\n for j in range(batch_size):\n tensor[i][j][char_to_ix[line[j * seq_length + i]]] = 1\n return tensor\n\n def lineToLongTensor(line):\n tensor = torch.LongTensor(seq_length, batch_size).zero_()\n for i in range(seq_length):\n for j in range(batch_size):\n tensor[i][j] = char_to_ix[line[j * seq_length + i]]\n return tensor\n\n class RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(RNN, self).__init__()\n\n self.hidden_size = hidden_size\n\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(hidden_size, output_size)\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = F.tanh(self.i2h(combined))\n output = self.i2o(hidden)\n return output, hidden\n\n def initHidden(self):\n return Variable(torch.zeros(batch_size, self.hidden_size))\n\n rnn = RNN(vocab_size, hidden_size, vocab_size)\n optimizer = torch.optim.Adagrad(rnn.parameters(), lr = learning_rate)\n criterion = nn.CrossEntropyLoss()\n\n def train(output_tensor, input_tensor):\n hidden = rnn.initHidden()\n\n optimizer.zero_grad()\n\n loss = 0\n\n for i in range(input_tensor.size()[0]):\n output, hidden = rnn(input_tensor[i], hidden)\n loss += criterion(output, output_tensor[i])\n\n loss.backward()\n\n # grad clipping and stepping\n torch.nn.utils.clip_grad_norm(rnn.parameters(), 5.0, norm_type=1)\n optimizer.step()\n\n return loss.data[0]\n\n end = time.time()\n prepareTime = end-start\n\n loss_save = []\n p = -seq_length * batch_size\n start = time.time()\n for iter in range(n_iter + 1):\n p += seq_length * batch_size\n if p+seq_length * batch_size+1 >= len(data): p = 0\n\n inputs = Variable(lineToTensor(data[p:p+seq_length * batch_size]))\n targets = Variable(lineToLongTensor(data[p+1:p+seq_length * batch_size +1]))\n loss = train(targets, inputs)\n if iter % iter_step == 0:\n print('iter %d, loss: %f' % (iter, loss))\n loss_save.append(loss)\n\n end = time.time()\n loopTime = end -start\n\n with open(write_to, \"w\") as f:\n f.write(\"unit: \" + \"100 iteration\\n\")\n for loss in loss_save:\n f.write(str(loss) + \"\\n\")\n f.write(\"run time: \" + str(prepareTime) + \" \" + str(loopTime) + \"\\n\")\n\nif __name__ == '__main__':\n import sys\n if (len(sys.argv) != 2):\n print(\"should have a file to write results to\")\n exit(0)\n run(sys.argv[1])","sub_path":"src/out/ICFP18evaluation/evaluationRNN/min-char-rnn-pytorch.py","file_name":"min-char-rnn-pytorch.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"141074868","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('boilerplate_draj', '0009_auto_20170108_1218'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='sample',\n name='owner',\n field=models.ForeignKey(related_name='userstatus_sample', blank=True, to=settings.AUTH_USER_MODEL, null=True),\n ),\n migrations.AlterField(\n model_name='userstatus',\n name='owner',\n field=models.ForeignKey(related_name='userstatus_owner', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='userstatus',\n name='user',\n field=models.OneToOneField(related_name='userstatus_user', to=settings.AUTH_USER_MODEL),\n ),\n ]\n","sub_path":"api-service/boilerplate_draj/migrations/0010_auto_20170108_1233.py","file_name":"0010_auto_20170108_1233.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316193443","text":"#!/usr/bin/env python3\n\ndef summation_of_primes(n):\n \"\"\"\n Returns the summation of primes below n\n \"\"\"\n primes = []\n primes.append(2)\n num = 3\n \n ## find primes below n\n while (primes[len(primes) - 1] < n):\n if is_prime(num):\n primes.append(num)\n num += 1\n \n # add list of primes below n \n total = 0\n for i in range(len(primes) - 1):\n total += primes[i]\n \n return total \n \n \ndef is_prime(n):\n if n < 2:\n return False\n if n == 2: \n return True \n if not n & 1: \n return False\n for x in range(3, int(n**0.5) + 1, 2):\n if n % x == 0:\n return False\n return True\n\n\nif __name__ == '__main__': \n print(summation_of_primes(2000000))","sub_path":"problem10.py","file_name":"problem10.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"50875180","text":"from django.shortcuts import render,redirect\nfrom .forms import KioskForm,KioskOwnerForm\n\n# Create your views here.\n\ndef upload_kiosk(request):\n if request.method == 'POST':\n form = KioskForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('upload-product') \n else:\n form = KioskForm\n return render(request,'upload_kiosk.html',{'form':form})\n \n\ndef upload_owner(request):\n if request.method == \"POST\":\n form = KioskOwnerForm(request.POST,request.FILES)\n if form.is_valid():\n form.save()\n return redirect('product_list.html') \n else:\n form = KioskOwnerForm\n return render(request,'upload_kioskOwner.html',{'form':form})\n \n\n \n \n \n \n \n \n ","sub_path":"kiosks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"1155851","text":"# -*- coding: utf-8 -*-\n\n\"\"\"cap base Invenio configuration.\"\"\"\n\nfrom __future__ import absolute_import, print_function\nimport os\nimport copy\nfrom invenio_oauthclient.contrib import cern\n\n\n# Identity function for string extraction\ndef _(x):\n return x\n\nDEBUG = True\n\n# Default language and timezone\nBABEL_DEFAULT_LANGUAGE = 'en'\nBABEL_DEFAULT_TIMEZONE = 'Europe/Zurich'\nI18N_LANGUAGES = [\n]\n\nBASE_TEMPLATE = \"invenio_theme/page.html\"\nCOVER_TEMPLATE = \"invenio_theme/page_cover.html\"\nSETTINGS_TEMPLATE = 'cap_theme/settings/base.html'\n\n# WARNING: Do not share the secret key - especially do not commit it to\n# version control.\nSECRET_KEY = \"changeme\"\n\n# Theme\nTHEME_SITENAME = _(\"CERN Analysis Preservation\")\nTHEME_LOGO = \"img/cap_logo_lrg.svg\"\nTHEME_GOOGLE_SITE_VERIFICATION = []\n\nREQUIREJS_CONFIG = 'js/cap-build.js'\n\nRECORDS_UI_BASE_TEMPLATE = 'records/detail.html'\nRECORDS_UI_TOMBSTONE_TEMPLATE = 'records/detail.html'\n\n# Records configuration\nRECORDS_UI_DEFAULT_PERMISSION_FACTORY = \"cap.modules.theme.permissions:read_permission_factory\"\n\nRECORDS_UI_ENDPOINTS = dict(\n recid=dict(\n pid_type='recid',\n route='/records/',\n template='records/detail.html',\n ),\n)\n\nEMAIL_REGEX = '[^@]+@[^@]+\\.[^@]+'\n\nCAP_COLLAB_EGROUPS = dict(\n CMS=dict(\n collaboration_cms=[\n \"cms-members\"\n ]\n ),\n ALICE=dict(\n collaboration_alice=[\n \"alice-member\"\n ]\n ),\n ATLAS=dict(\n collaboration_atlas=[\n \"atlas-active-members-all\"\n ]\n ),\n LHCb=dict(\n collaboration_lhcb=[\n \"lhcb-general\"\n ]\n )\n)\n\nACCOUNTS_REGISTER_BLUEPRINT = False\nSECURITY_POST_CHANGE_VIEW = False\nSECURITY_RECOVERABLE = False\nSECURITY_REGISTERABLE = False\nSECURITY_CHANGEABLE = False\nOAUTHCLIENT_LOGIN_USER_TEMPLATE = 'access/login_user.html'\n\nRECORDS_REST_ENDPOINTS = dict(\n recid=dict(\n pid_type='recid',\n pid_minter='cap_record_minter',\n pid_fetcher='cap_record_fetcher',\n search_index='_all',\n search_type=None,\n record_serializers={\n 'application/json': ('cap.modules.records.serializers'\n ':json_v1_response'),\n },\n search_serializers={\n 'application/json': ('cap.modules.records.serializers'\n ':json_v1_search'),\n },\n list_route='/records/',\n item_route='/records/',\n default_media_type='application/json',\n ),\n)\n\n# SearchUI API endpoint.\nSEARCH_UI_SEARCH_API = '/api/records/'\n\n# Database\n# SQLALCHEMY_DATABASE_URI = \"postgresql+psycopg2://localhost/cap\"\n\n# ElasticSearch\nSEARCH_ELASTIC_HOSTS = [\"localhost:9200\"]\n\n# Path to where JSON metadata exist\nJSON_METADATA_PATH = \"/_metadata\"\n\n# Mail\nMAIL_SUPPRESS_SEND = True\n\n# OAuth configuration\nCERN_APP_CREDENTIALS = {\n 'consumer_key': os.environ.get('CERN_APP_CREDENTIALS_KEY'),\n 'consumer_secret': os.environ.get('CERN_APP_CREDENTIALS_SECRET'),\n}\n\nCERN_REMOTE_APP = copy.deepcopy(cern.REMOTE_APP)\nCERN_REMOTE_APP[\"params\"].update({\n 'request_token_params': {\n \"scope\": \"Email Groups\",\n }\n})\nOAUTHCLIENT_REMOTE_APPS = {'cern': CERN_REMOTE_APP}\nSECURITY_SEND_REGISTER_EMAIL = False\n\nBASE_TEMPLATE = \"cap_theme/page.html\"\nSECURITY_LOGIN_USER_TEMPLATE = \"access/login_user.html\"\n# config.setdefault(\n# 'COVER_TEMPLATE', 'invenio_theme/page_cover.html')\n# config.setdefault(\n# 'SETTINGS_TEMPLATE', 'invenio_theme/page_settings.html')\n# config.setdefault(\n# 'THEME_BASE_TEMPLATE', config['BASE_TEMPLATE'])\n# config.setdefault(\n# 'THEME_COVER_TEMPLATE', config['COVER_TEMPLATE'])\n# config.setdefault(\n# 'THEME_SETTINGS_TEMPLATE', config['SETTINGS_TEMPLATE'])\n# config.setdefault(\n# 'THEME_ERROR_TEMPLATE', 'invenio_theme/error.html')\n# config.setdefault(\n# 'THEME_401_TEMPLATE', 'invenio_theme/401.html')\n# config.setdefault(\n# 'THEME_403_TEMPLATE', 'invenio_theme/403.html')\n# config.setdefault(\n# 'THEME_404_TEMPLATE', 'invenio_theme/404.html')\n# config.setdefault(\n# 'THEME_500_TEMPLATE', 'invenio_theme/500.html')\n\nSEARCH_QUERY_ENHANCERS = [\n 'cap.modules.access.ext:authenticated_query'\n]\n\nJSONSCHEMAS_HOST = 'https://localhost:5000'\n\nENABLE_SUPERPOWERS_FOR_EVERYONE = False\n\"\"\"Enable all the users to perform all the actions.\"\"\"\n","sub_path":"cap/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"521805688","text":"#!/usr/bin/python\n\nfrom unittest.mock import Mock, patch\n\nimport pygame\nimport pygame.freetype\nimport pytest\n\nfrom yendor import (\n coord,\n monster,\n )\n\n\n# FIXME: de-dupe with test_dungeon.py\n@pytest.fixture\ndef fontinit():\n pygame.init()\n pygame.freetype.init()\n\n\nmult = 32\n\n\ndef gc_to_cc(gc):\n return coord.Coord(gc.x * 32, gc.y * 32)\n\n\ndef cc_to_gc(cc):\n return coord.Coord(int(cc.x / 32), int(cc.y / 32))\n\n\ndef test_monster_update(fontinit):\n start = coord.Coord(3, 0)\n start_cc = gc_to_cc(start)\n end = start.south().south()\n g = pygame.sprite.Group()\n\n grid = Mock()\n grid.grid_coord_to_client.side_effect = gc_to_cc\n grid.client_coord_to_grid.side_effect = cc_to_gc\n grid.path.return_value = [start, start.south()]\n\n m = monster.Lizard(start, end, grid)\n g.add(m)\n assert m.alive()\n assert m.rect.x == start_cc.x\n assert m.rect.y == start_cc.y\n assert m.velocity.direction == coord.SOUTH\n assert m.velocity.xVelocity < 1e-10\n\n # Update should move monster south by 30px.\n expected_y = 30\n ms_per_px = 1000.0 / m.speed\n m.update(ms_per_px * expected_y)\n assert m.coord.x == start_cc.x\n assert abs(m.coord.y - (start_cc.y + expected_y)) < 0.0001\n\n # Move the last 2px which will move to the base and the monster\n # will no longer be active.\n m.update(ms_per_px * 2)\n assert not m.alive()\n\n\ndef test_monster_injure(fontinit):\n start = coord.Coord(0, 0)\n end = start.south()\n g = pygame.sprite.Group()\n\n grid = Mock()\n grid.grid_coord_to_client.side_effect = gc_to_cc\n grid.client_coord_to_grid.side_effect = cc_to_gc\n grid.path.return_value = [start, start.south()]\n\n with patch.object(monster.dice, 'roll') as roll:\n roll.return_value = 18\n m = monster.Orc(start, end, grid)\n g.add(m)\n assert m.alive()\n\n assert m.health == 18\n m.injure(12)\n assert m.health == 6\n assert m.alive()\n m.injure(6)\n assert m.health == 0\n assert not m.alive()\n\n\ndef test_monster_status_message(fontinit):\n start = coord.Coord(0, 0)\n end = start.south()\n\n grid = Mock()\n grid.grid_coord_to_client.side_effect = gc_to_cc\n grid.client_coord_to_grid.side_effect = cc_to_gc\n grid.path.return_value = [start, start.south()]\n\n m = monster.Orc(start, end, grid)\n m.health = 1234\n gs = Mock()\n gs.grid = Mock()\n gs.grid.client_coord_to_grid.return_value = \"XYZ\"\n msg = m.status_message(gs)\n assert \"Orc @ XYZ\" in msg\n assert \"health: 1234\" in msg\n\n\ndef test_monster_has_money_prob(fontinit):\n start = coord.Coord(0, 0)\n end = start.south()\n\n grid = Mock()\n grid.grid_coord_to_client.side_effect = gc_to_cc\n grid.client_coord_to_grid.side_effect = cc_to_gc\n\n def path(*args, **kwargs):\n return [start, start.south()]\n\n grid.path.side_effect = path\n\n ms = [monster.Orc(start, end, grid) for _ in range(1000)]\n monied = [m for m in ms if m.money > 0]\n\n # Expect 2/5 to have money, or about 400. (In about a half-dozen\n # trials this range worked; it's likely that there will be a false\n # failure at some point, but most trials should fall in this\n # range. I wrote this test because I was suspicious after playing\n # that not enough monsters were given gold based on the 2/5 rule.)\n nmonied = len(monied)\n assert nmonied > 360 and nmonied < 440\n","sub_path":"tests/test_monster.py","file_name":"test_monster.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"390282231","text":"'''Leetcode 1290: Easy \nCategory: Linked Lists, Base Conversion\nEstimated Time Taken: 15 minutes\nConvert Binary Number in Linked List to Integer\n'''\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def getDecimalValue(self, head: ListNode) -> int:\n bin_list = []\n #continue to append current node's value to a list\n if not head:\n return 0\n else:\n while head:\n bin_list.append(head.val)\n head = head.next\n\n #convert the binary number (stored as a list) into a decimal num\n dec_num = 0\n multiplier = 1\n for i in range (len(bin_list) - 1, -1, -1):\n # print(i)\n dec_num += bin_list[i] * multiplier\n # print(f\"dec num: {dec_num}\")\n multiplier *= 2\n return dec_num","sub_path":"daily_DS_practice/leetcode_1290.py","file_name":"leetcode_1290.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"210068164","text":"# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom recipe_engine.types import freeze\n\nDEPS = [\n 'depot_tools/bot_update',\n 'chromium',\n 'file',\n 'depot_tools/gsutil',\n 'recipe_engine/path',\n 'recipe_engine/platform',\n 'recipe_engine/properties',\n 'recipe_engine/python',\n 'recipe_engine/step',\n]\n\n\nBUILDERS = freeze({\n 'tryserver.chromium.linux': {\n 'builders': {\n 'linux_upload_clang': {\n 'chromium_config_kwargs': {\n 'BUILD_CONFIG': 'Release',\n 'TARGET_PLATFORM': 'linux',\n 'TARGET_BITS': 64,\n },\n\n # We need this to build the Clang toolchain\n # with proper AddressSanitizer prebuilts for\n # Chrome on Android.\n 'gclient_apply_config': ['android'],\n },\n },\n },\n 'tryserver.chromium.mac': {\n 'builders': {\n 'mac_upload_clang': {\n 'chromium_config_kwargs': {\n 'BUILD_CONFIG': 'Release',\n 'TARGET_PLATFORM': 'mac',\n 'TARGET_BITS': 64,\n },\n },\n },\n },\n 'tryserver.chromium.win': {\n 'builders': {\n 'win_upload_clang': {\n 'chromium_config_kwargs': {\n 'BUILD_CONFIG': 'Release',\n 'TARGET_PLATFORM': 'win',\n 'TARGET_BITS': 32,\n },\n },\n },\n },\n})\n\n\ndef RunSteps(api):\n _, bot_config = api.chromium.configure_bot(BUILDERS)\n\n api.bot_update.ensure_checkout(\n patch_root=bot_config.get('root_override'))\n\n api.python('update win toolchain',\n api.path['checkout'].join('build', 'vs_toolchain.py'), ['update'])\n api.python('update mac toolchain',\n api.path['checkout'].join('build', 'mac_toolchain.py'))\n api.python('download binutils',\n api.path['checkout'].join('third_party', 'binutils', 'download.py'))\n\n api.python(\n 'package clang',\n api.path['checkout'].join('tools', 'clang', 'scripts', 'package.py'),\n args=['--upload'])\n\n\ndef GenTests(api):\n for test in api.chromium.gen_tests_for_builders(BUILDERS):\n yield test\n","sub_path":"scripts/slave/recipes/chromium_upload_clang.py","file_name":"chromium_upload_clang.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"176867857","text":"#!/usr/bin/env python\nimport os,json,time,math,types\nfrom subprocess import Popen, PIPE\nimport matplotlib.pyplot as plt\nfrom numpy import var,mean\nfrom graph_lib.graph_generator import Gen\nfrom random import Random\nimport networkx as nx\nr=Random(1234)\n\n\ndef gen_graph(N):\n ge = Gen()\n ge.genGraph(\"PLAW\", N)\n graph = ge.graph\n graph2=nx.Graph()\n for e in graph.edges():\n graph2.add_edge(e[0],e[1],weight=r.uniform(0,10))\n graph=graph2\n netjson = ge.composeNetJson(graph)\n #print(netjson)\n json_netjson = json.dumps(netjson)\n text_file = open(\"input.json\", \"w+\")\n text_file.write(json_netjson)\n text_file.close()\n return graph\n\ndef time_exe(is_c,heu):\n heu=str(heu)\n exe=\"./c++.out \"\n if is_c:\n exe=\"./c.out\"\n heu=\" \"+heu+\" 0\"\n start = time.time()\n #print(exe+heu)\n p = Popen(exe+heu,shell=True,stdout=PIPE,stderr=PIPE)\n #os.wait4(p.pid, 0)\n out, err = p.communicate()\n elapsed = time.time() - start\n title=\"\"\n if is_c:\n title+=\"C \"\n else:\n title+=\"C++ \"\n if heu:\n title+=\" with heu\"\n else:\n title+=\" without heu\"\n if out:\n #print(out)\n out=eval(out)\n global rounding\n out= {k:round(v,rounding) for k,v in out.iteritems()}\n #out= {k:v for k,v in out.iteritems()}\n return elapsed,out\n\n\n\nrounding=2\n#start,end,jump=100,1800+1,100\nstart,end,jump=200,2000+1,100\nrepetitions=10\nmax=int(math.ceil(float(end-start)/jump))*repetitions\n\nres={}\nres[\"x\"]=[]\nres[\"c_var\"]=[]\nres[\"c_mean\"]=[]\nres[\"c_eu_var\"]=[]\nres[\"c_eu_mean\"]=[]\nres[\"c++_var\"]=[]\nres[\"c++_mean\"]=[]\nres[\"c++_eu_var\"]=[]\nres[\"c++_eu_mean\"]=[]\nindex=1\nfor i in range(start,end,jump):\n #g=nx.read_weighted_edgelist()\n c_eu=[]\n c=[]\n cpp_eu=[]\n cpp=[]\n for j in xrange(repetitions):\n g=nx.read_weighted_edgelist(\"data/\"+str(i)+\"/\"+str(j))\n netjson = Gen().composeNetJson(g)\n #print(netjson)\n json_netjson = json.dumps(netjson)\n text_file = open(\"input.json\", \"w+\")\n text_file.write(json_netjson)\n text_file.close()\n\n print(str(round(100*float(index)/max,2))+\"%\")\n index+=1\n timer,val1=time_exe(1,0)\n c.append(timer)\n timer,val2=time_exe(1,1)\n c_eu.append(timer)\n timer,val3=time_exe(0,0)\n cpp.append(timer)\n timer,val4=time_exe(0,1)\n cpp_eu.append(timer)\n #print(val1)\n actual_res=nx.betweenness_centrality(g,endpoints=True,weight='weight')\n actual_res= {k:round(v,rounding) for k,v in actual_res.iteritems()}\n #actual_res= {k:v for k,v in actual_res.iteritems()}\n #print(actual_res)\n continue\n if (not (actual_res==val1 and val1==val2)):\n #print({k:(v-val1[k],v,val1[k]) for k,v in actual_res.iteritems() if v-val1[k]!=0})\n #print({k:(v-val2[k],v,val2[k]) for k,v in actual_res.iteritems() if v-val2[k]!=0})\n print(\"val1\",val1)\n print(\"val2\",val2)\n print(\"actual_res\",actual_res)\n #print(\"1\",val1)\n #print(\"2\",val2)\n #print(\"3\",val3)\n #print(\"4\",val4)\n for e in g.edges(data='weight'):\n print('add_edge_graph(&g1,\"'+str(e[0])+'\",\"'+str(e[1])+'\",'+str(e[2])+',0);')\n import sys\n sys.exit(0)\n\n res[\"c_var\"].append(var(c))\n res[\"c_mean\"].append(mean(c))\n res[\"c_eu_var\"].append(var(c_eu))\n res[\"c_eu_mean\"].append(mean(c_eu))\n res[\"c++_var\"].append(var(cpp))\n res[\"c++_mean\"].append(mean(cpp))\n res[\"c++_eu_var\"].append(var(cpp_eu))\n res[\"c++_eu_mean\"].append(mean(cpp_eu))\n res[\"x\"].append(i)\nprint(res)\nexit(0)\nplt.errorbar(res[\"x\"], res[\"c++_mean\"], yerr=res[\"c++_var\"], label=\"C++ w/o h\")\nplt.errorbar(res[\"x\"], res[\"c++_eu_mean\"], yerr=res[\"c++_eu_var\"], label=\"C++ w h\")\nplt.errorbar(res[\"x\"], res[\"c_mean\"], yerr=res[\"c_var\"], label=\"C w/o h\")\nplt.errorbar(res[\"x\"], res[\"c_eu_mean\"], yerr=res[\"c_eu_var\"], label=\"C w h\")\nplt.xlabel('size of graph (nodes)')\nplt.ylabel('execution time (s)')\nplt.legend(loc='upper center', shadow=True)\n#plt.axhline(1,color='k')\nfor var in (res[\"c_mean\"], res[\"c_eu_mean\"],res[\"c++_mean\"],res[\"c++_eu_mean\"]):\n plt.annotate('%0.2f' % var[-1], xy=(1, var[-1]), xytext=(8, 0),\n xycoords=('axes fraction', 'data'), textcoords='offset points')\nplt.savefig('res.png')\nplt.show()\n","sub_path":"prince/src/test_routines/test_exe.py","file_name":"test_exe.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"415854973","text":"import os\n\nfrom flask import Flask\n\n\ndef create_app(test_config=None):\n # Create and configure the app\n app = Flask(__name__, instance_relative_config=True, static_url_path='/static')\n app.config.from_mapping(\n PORT=5000\n )\n\n @app.context_processor\n def context_processor():\n\n site_title = \"Simon's Blog\"\n title_logo_img_src = \"static/images/title-logo.jpeg\"\n\n email_icon = {\n \"img_src\": \"static/images/email-logo.png\",\n \"alt_txt\": \"email-logo\",\n \"href\": \"mailto:simonbuusjensen@hotmail.com\"\n }\n\n github_icon = {\n \"img_src\": \"static/images/github-logo.png\",\n \"alt_txt\": \"github-logo\",\n \"href\": \"https://github.com/SimonBuusJensen\",\n }\n\n linkedin_icon = {\n \"img_src\": \"static/images/linkedin-logo.png\",\n \"alt_txt\": \"linkedin-logo\",\n \"href\": \"https://www.linkedin.com/in/simon-buus-jensen-1ba307a4/\"\n }\n logo_imgs = [email_icon, github_icon, linkedin_icon]\n\n return dict(site_title=site_title, title_logo_img_src=title_logo_img_src, logo_imgs=logo_imgs)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n from . import posts\n app.register_blueprint(posts.bp)\n app.add_url_rule('/', endpoint='index')\n app.add_url_rule('/2019-06-05-grundlovsdag', endpoint='2019-06-05-grundlovsdag')\n\n from . import about\n app.register_blueprint(about.bp)\n app.add_url_rule('/about', endpoint='about')\n\n from . import books\n app.register_blueprint(books.bp)\n app.add_url_rule(\"/books\", endpoint='books')\n\n return app\n","sub_path":"flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"387856272","text":"import logging\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import (\n JsonResponse,\n HttpResponseBadRequest,\n HttpResponseNotAllowed,\n)\n\nfrom . import (\n transactions,\n forms,\n models,\n)\n\n\nLOGGER = logging.getLogger(__name__)\n\n\n@csrf_exempt\ndef deposit_package(request):\n if request.method != 'POST':\n return HttpResponseNotAllowed(['POST'])\n\n form = forms.DepositForm(request.POST, request.FILES)\n if form.is_valid():\n try:\n deposit_id = transactions.deposit_package(\n form.cleaned_data['package'],\n form.cleaned_data['md5_sum'],\n form.cleaned_data['depositor']\n )\n\n except transactions.ChecksumError as exc:\n LOGGER.exception(exc)\n return HttpResponseBadRequest()\n\n return JsonResponse({'deposit_id': deposit_id})\n\n else:\n return HttpResponseBadRequest()\n","sub_path":"frontdesk/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"570938802","text":"\nimport os\n# 数量>500的关系抽取出来\nremains={'P131','P22','P7','P17','P27','P150','P19','P47','P69','P20'}\n\ndef extract_data(ori_dir,new_dir,name='train'):\n ori_path=os.path.join(ori_dir,name+\"_zh.txt\")\n new_path=os.path.join(new_dir,name+\"_zh.txt\")\n fn=open(new_path,mode='w',encoding='utf-8')\n if name=='train':\n na=100000\n else:\n na=10000\n with open(ori_path,encoding='utf-8') as f:\n for line in f:\n if line.split()[4]=='NA' and na>=0:\n fn.write(line)\n na-=1\n elif line.split()[4] in remains:\n fn.write(line)\n\n\n fn.close()\n\ndef data_ct(dir,name='train'):\n new_path=os.path.join(dir,name+\"_zh.txt\")\n res=0\n with open(new_path,mode='r') as f:\n res=len(f.readlines())\n print('{} count: {}'.format(name,res))\nif __name__=='__main__':\n ori_dir='/home/lnn/Documents/OpenNRE-Ina/OpenNRE-PyTorch/mnre_data'\n # new_dir='/home/lnn/Documents/OpenNRE-Ina/OpenNRE-PyTorch/mnre_data/thesis_data/raw_data'\n new_dir='/home/nana/Documents/pycharmforlinux/opennre-pytorch/mnre_data/thesis_data/raw_data'\n extract_data(ori_dir,new_dir,name='train')\n # data_ct(new_dir,name='valid')\n # data_ct(new_dir,name='test')\n","sub_path":"ina_process/part_of_data.py","file_name":"part_of_data.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"599659472","text":"# coding: utf-8\r\nimport os\r\nimport datetime\r\n\r\n\r\n# linux의 crontab에 있는 cron_list들 추출\r\ndef get_crontab_list():\r\n cron_list = os.popen('./cron_command.sh list').read().strip().split(\"\\n\")\r\n if cron_list==['']:\r\n return False\r\n data = []\r\n for item in cron_list:\r\n item = item.split()\r\n data.append([item[1], item[2],item[3],item[4],item[5],item[7],item[9]])\r\n return data\r\n \r\n\r\ndef get_crontab_index(file_dir):\r\n crontab_list = get_crontab_list()\r\n if not crontab_list:\r\n return False\r\n for (index, item) in enumerate(crontab_list):\r\n if item[5] == file_dir:\r\n return index+1 # crontab remove 1 - 1 부터시작\r\n return False\r\n \r\n\r\ndef delete_crontab(filename):\r\n index = get_crontab_index(filename)\r\n if index:\r\n os.system(f'./cron_command.sh remove {index}')\r\n return True\r\n return False\r\n\r\n\r\n","sub_path":"cron/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"627463653","text":"# coding: utf-8\nimport MeCab\nfrom django.utils import html\n\n\ndef getmecabresult(text):\n text = text.replace(\" \", \"\\n\")\n text = html.strip_tags(text)\n imi_word = {}\n imi_wordlist = []\n with open(\"./taboo_word_checker/bad.tsv\", \"r\") as f:\n for imi in f:\n item = imi.strip().split(\"\\t\")\n imi_wordlist.append(item[0])\n imi_word[item[0]] = {\"replace\": item[1], \"reason\": item[2]}\n\n mecab = MeCab.Tagger(\"-Owakati\")\n mecab.parse('')\n\n word_list = text.split(\"\\n\")\n resultlist = []\n count = 1\n print(word_list)\n word_obj = {}\n for word in word_list:\n node = mecab.parseToNode(word)\n while node:\n word = node.surface\n print(word)\n word_obj[\"word\"] = word\n word_obj[\"replace\"] = \"\"\n word_obj[\"reason\"] = \"\"\n word_obj[\"istaboo\"] = False\n for iw in imi_wordlist:\n if word.find(iw) != -1:\n word_obj[\"replace\"] = imi_word[iw][\"replace\"]\n word_obj[\"reason\"] = imi_word[iw][\"reason\"]\n word_obj[\"istaboo\"] = True\n count += 1\n break\n node = node.next\n resultlist.append(word_obj.copy())\n resultlist.append({\"word\": \"\\n\", \"replace\": \"\", \"reason\": \"\"})\n data = {\"resultlist\": resultlist, \"count\": count}\n return data\n","sub_path":"taboo_word_checker/marriage_word_check.py","file_name":"marriage_word_check.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"118998308","text":"\n# A very simple Flask Hello World app for you to get started with...\n\nfrom flask import Flask, request\nimport joblib\nimport pickle\n\napp = Flask(__name__)\napp.debug = True\n\nclf = joblib.load('../covid_predictor.pkl')\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World!'\n\n\n@app.route('/check')\ndef check_form():\n return app.send_static_file('check.html')\n\n\n@app.route('/check_predictor', methods=['POST'])\ndef check_predictor():\n dry_cough = int(request.form['dry_cough'])\n high_fever = int(request.form['high_fever'])\n sore_throat = int(request.form['sore_throat'])\n difficult_breathing = int(request.form['difficult_breathing'])\n prediction = clf.predict([[dry_cough, high_fever, sore_throat, difficult_breathing]])\n probabilities = clf.predict_proba([[dry_cough, high_fever, sore_throat, difficult_breathing]])\n probabiliy = round(float(probabilities[0][1]*100), 2)\n # create the probabiliy message\n probability_message = \"({0} % risque d'infection)\".format(str(probabiliy))\n\n # Return message based on the prediction\n if prediction[0] == 1:\n return \"Vous êtes probablement atteint! \" + probability_message\n else:\n return \"Vous n'êtes probablement pas atteint! \" + probability_message\n \n \nif __name__ == '__main__':\n app.run()\n \n","sub_path":"web_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"8209215","text":"from pycmp.grammar import Grammar, Sentence, Symbol, Production, NonTerminal\nfrom grammar_analyzer.enhancer.converter import grammar_to_graph, graph_to_grammar\nfrom grammar_analyzer.enhancer.unnecesary_productions import (\n remove_unit_prods,\n remove_unreachable_prods,\n)\n\n\ndef remove_left_recursion(grammar):\n new_grammar = __remove_epsilon_productions(grammar)\n new_grammar = remove_unit_prods(new_grammar)\n\n nonterminals = [t.name for t in new_grammar.nonterminals]\n\n S, d = grammar_to_graph(new_grammar)\n\n for i in range(0, len(nonterminals)):\n for j in range(0, i):\n for sentence in d[nonterminals[i]]:\n if len(sentence) > 0 and sentence[0] == nonterminals[j]:\n d[nonterminals[i]].remove(sentence)\n remove_first = sentence[1:len(sentence)]\n\n for sentence in d[nonterminals[j]]:\n new_sentence = []\n for item in sentence:\n new_sentence.append(item)\n for item in remove_first:\n new_sentence.append(item)\n d[nonterminals[i]].append(new_sentence)\n d = __remove_inmediate_left_recursion(d)\n\n return graph_to_grammar(S, d)\n\n\ndef __remove_inmediate_left_recursion(d: dict):\n new_productions = []\n\n for key, value in d.items():\n recursion = []\n no_recursion = []\n for sentence in value:\n if sentence == []:\n no_recursion.append(sentence)\n elif sentence[0] == key:\n recursion.append(sentence)\n else:\n no_recursion.append(sentence)\n\n if len(recursion) == 0:\n for sentence in no_recursion:\n new_productions.append((key, sentence))\n\n # there's some left recursion\n else:\n X = key + \"'\"\n\n for sentence in no_recursion:\n sentence.append(X)\n new_productions.append((key, sentence))\n\n for sentence in recursion:\n new_sentence = []\n for symb in sentence:\n if symb == key:\n continue\n new_sentence.append(symb)\n new_sentence.append(X)\n new_productions.append((X, new_sentence))\n\n new_productions.append((X, []))\n\n new_d = {}\n\n for p in new_productions:\n try:\n new_d[p[0]].append(p[1])\n except:\n new_d[p[0]] = [p[1]]\n\n return new_d\n\n\ndef __remove_epsilon_productions(grammar):\n S, d = grammar_to_graph(grammar)\n nonterminals = [t.name for t in grammar.nonterminals]\n\n nullable = {}\n nullable = __find_nullable_nonterminals(d, nullable, S, nonterminals)\n\n for key, value in d.items():\n new_value = [v for v in value]\n\n for sentence in value:\n if sentence == []:\n new_value.remove(sentence)\n\n for i in range(0, len(sentence)):\n if sentence[i] in nonterminals and nullable[sentence[i]]:\n new_sentence = sentence[0:i] + sentence[i +\n 1:len(sentence)]\n\n if not new_sentence in new_value:\n new_value.append(new_sentence)\n\n d[key] = new_value\n\n if nullable[S]:\n d[S].append([])\n\n return graph_to_grammar(S, d)\n\n\ndef __find_nullable_nonterminals(d, nullable, symbol, nonterminals):\n\n try:\n _ = nullable[symbol]\n return nullable\n except KeyError:\n nullable[symbol] = False\n\n for Sentence in d[symbol]:\n local_nullable = True\n for symb in Sentence:\n if symb == \"epsilon\":\n break\n elif not symb in nonterminals:\n local_nullable = False\n else:\n nullable = __find_nullable_nonterminals(\n d, nullable, symb, nonterminals)\n local_nullable = local_nullable and nullable[symb]\n\n nullable[symbol] = nullable[symbol] or local_nullable\n\n return nullable\n","sub_path":"src/grammar_analyzer/enhancer/left_recursion.py","file_name":"left_recursion.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"444093622","text":"import sys # argv, exit()\n\ndef print_usage_and_exit():\n print('SYNOPSIS')\n print()\n print('{} [-h | --help]'.format(sys.argv[0]))\n print(' print this help message and exit')\n print()\n print('{} ifile ofile'.format(sys.argv[0]))\n print(' 1) read .pcd file ')\n print(' 2) remove every 2 out of 3 points')\n print(' 3) write resulting point cloud to file ofile in pcd format')\n #print(' Parameters xmin, ymin and zmin default to 0')\n print()\n sys.exit()\n\ndef get_args():\n if (len(sys.argv) > 1 and sys.argv[1] in ['-h', '--help']):\n print_usage_and_exit()\n elif len(sys.argv) > 2:\n infilename = sys.argv[1]\n outfilename = sys.argv[2]\n return (infilename, outfilename)\n\ndef read_file(infilename):\n header = []\n pointcloud = []\n with open(infilename, 'r') as f:\n for line in f:\n row = line.split()\n if row[0] in ['VERSION', 'FIELDS', 'SIZE', 'TYPE', 'COUNT', 'WIDTH', 'HEIGHT', 'VIEWPOINT', 'POINTS', 'DATA']:\n header.append(line)\n else:\n (sx,sy,sz,srgba) = row\n (x,y,z,rgba) = (float(sx), float(sy), float(sz), int(srgba))\n pointcloud.append((x,y,z,rgba))\n return header, pointcloud\n\ndef dont_kill_it(i):\n return (\n i % 3 == 0\n )\n\ndef filter_pointcloud(pointcloud):\n return [(x,y,z,rgba) for (i,(x,y,z,rgba)) in enumerate(pointcloud) if dont_kill_it(i)]\n\ndef fix_header(header, new_nb_points):\n new_header = []\n for line in header:\n row = line.split()\n if row[0] in ['WIDTH', 'POINTS']:\n new_header.append('{} {}\\n'.format(row[0], new_nb_points))\n else:\n new_header.append(line)\n return new_header\n\ndef write_file(header, pointcloud, outfilename):\n with open(outfilename, 'w') as f:\n for line in header:\n f.write(line)\n for (x,y,z,rgba) in pointcloud:\n f.write(\"{} {} {} {}\\n\".format(x, y, z, rgba))\n\ndef main():\n (infilename, outfilename) = get_args()\n header, pointcloud = read_file(infilename)\n filtered_pointcloud = filter_pointcloud(pointcloud)\n header = fix_header(header, len(filtered_pointcloud))\n write_file(header, filtered_pointcloud, outfilename)\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts_python/thin_cloud.py","file_name":"thin_cloud.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"561587769","text":"import configparser\n\nconfig = configparser.ConfigParser()\nfilename = '../config/config.ini'\nconfig.read(filename, encoding='utf-8')\nsections = config.sections()\nprint(sections)\nitems = config.items('svn')\nprint(items)\nvpn_path = config.get('svn', 'svn_path')\nprint(vpn_path)","sub_path":"wntest/configparser_test.py","file_name":"configparser_test.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"204443131","text":"#!/usr/bin/env python3\nimport cv2\nimport sys\nimport os\nimport logging as log\nimport datetime as dt\nfrom time import sleep\n\ncascPath = \"haarcascade_frontalface_default.xml\"\nfaceCascade = cv2.CascadeClassifier(cascPath)\n\next=os.path.splitext(sys.argv[1])[1]\nif ext=='.jpg' or ext=='.gif' or ext=='.jpeg' or ext=='.png':\n image=cv2.imread(sys.argv[1],0)\n faces = faceCascade.detectMultiScale(\n image,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(5, 5)\n )\n res=len(faces)\n print(res)\n exit(res)\n\nvideo_capture = cv2.VideoCapture(sys.argv[1])\nanterior = 0\n\nsum=0\ntotal_frames=0\n\nwhile True:\n if not video_capture.isOpened():\n print('Unable to load camera.')\n sleep(5)\n pass\n\n ret, frame = video_capture.read()\n total_frames+=1\n if total_frames%5!=0:\n continue\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=2,\n minSize=(5, 5)\n )\n\n sum+=len(faces)\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n if anterior != len(faces):\n anterior = len(faces)\n\n\n cv2.imshow('Video', frame)\n\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\nres=int((sum*5)/total_frames)\n\nprint(res)\n\n## release objects\nvideo_capture.release()\ncv2.destroyAllWindows()\n\nexit(res)\n","sub_path":"EventRegistration-Spring/countFaces.py","file_name":"countFaces.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"12694308","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nimport asyncio\nimport orm\nfrom models import User, Blog, Comment, DeviceInfo, ExDeviceInfo\nimport random\nimport string\nimport struct, socket\nimport time\nimport datetime\n\nref = 18\n\nexDeviceInfo = ExDeviceInfo()\n\nexDeviceInfo.ref = ref\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(orm.get_pool(loop))\ndeviceInfo = loop.run_until_complete(DeviceInfo.findSpecCls('id',ref))\n\nrd = random.randint(1,3)\n\nsubscriberList = [random.choice(string.digits) for i in range(10)]\n\nif rd == 1:\n print('bestProvider: network')\n deviceInfo.bestProvider = 'network'\n\n deviceInfo.networkOperator = '46000'\n deviceInfo.networkOperatorName = 'CMCC'\n deviceInfo.simOperator = '46000'\n deviceInfo.simOperatorName = 'CMCC'\n\n deviceInfo.subscriberId = '46000' + ''.join(subscriberList)\n print(deviceInfo.subscriberId)\n\nelif rd == 2:\n print('bestProvider: passive')\n deviceInfo.bestProvider = 'passive'\n\n deviceInfo.networkOperator = '46001'\n deviceInfo.networkOperatorName = 'CHINA UNICOM'\n deviceInfo.simOperator = '46001'\n deviceInfo.simOperatorName = 'CHINA UNICOM'\n\n deviceInfo.subscriberId = '46001' + ''.join(subscriberList)\n print(deviceInfo.subscriberId)\n\nelse:\n print('bestProvider: gps')\n deviceInfo.bestProvider = 'gps'\n\n deviceInfo.networkOperator = '46003'\n deviceInfo.networkOperatorName = 'CHINA TELECOM'\n deviceInfo.simOperator = '46003'\n deviceInfo.simOperatorName = 'CHINA TELECOM'\n \n deviceInfo.subscriberId = '46003' + ''.join(subscriberList)\n print(deviceInfo.subscriberId)\n\n#deviceId\ndeviceList = [random.choice(string.digits) for i in range(15)]\ndeviceId = ''.join(deviceList)\nprint(deviceId)\ndeviceInfo.deviceId = deviceId\n\n#androidid\nandroidList = [random.choice(string.ascii_letters + string.digits) for i in range(16)]\nandroidid = ''.join(androidList)\nprint(androidid)\ndeviceInfo.androidid = androidid\n\n#networkType\ndeviceInfo.networkType = random.randint(1,15)\n\n#simSerialNumber\nsimSerialList = [random.choice(string.digits) for i in range(15)]\nsimSerialNumber = '8986' + ''.join(simSerialList)\nprint(simSerialNumber)\ndeviceInfo.simSerialNumber = simSerialNumber\n\n#getSerial\nrd = random.randint(10,20)\nserialList = [random.choice(string.ascii_letters + string.digits) for i in range(rd)]\ndeviceInfo.getSerial = ''.join(serialList)\nprint(deviceInfo.getSerial)\n\n\ndef randomAddress():\n addressList = []\n for i in range(1, 7):\n randomStr = ''.join(random.sample('0123456789abcdef', 2))\n addressList.append(randomStr)\n randomAddress = ':'.join(addressList)\n print(randomAddress)\n return randomAddress\n\nRANDOM_IP_POOL = ['192.168.10.220/0']\ndef get_random_ip():\n str_ip = RANDOM_IP_POOL[random.randint(0, len(RANDOM_IP_POOL) - 1)] \n str_ip_addr = str_ip.split('/')[0]\n str_ip_mark = str_ip.split('/')[1]\n ip_addr = struct.unpack('>I', socket.inet_aton(str_ip_addr))[0]\n mask = 0x0\n for i in range(31, 31-int(str_ip_mark), -1):\n mask = mask | (1 << i)\n ip_addr_min = ip_addr & (mask & 0xffffffff)\n ip_addr_max = ip_addr | (~mask & 0xffffffff)\n return socket.inet_ntoa(struct.pack('>I', random.randint(ip_addr_min, ip_addr_max)))\n\n\n\ndef get_time():\n a = '2010-01-01 10:00:00'\n time.strptime(a, '%Y-%m-%d %H:%M:%S')\n now = time.mktime(time.strptime(a,'%Y-%m-%d %H:%M:%S'))\n return now\n\ndef get_random_time():\n start = int(get_time())\n end = int(time.time())\n rd = random.randint(100,999)\n result = str(random.randint(start, end)) + str(rd)\n return result\n\n\n\n#macAddress\ndeviceInfo.macAddress = randomAddress()\n\n#bssid\ndeviceInfo.bssid = randomAddress()\n\n#ipAddress\ndeviceInfo.ipAddress = get_random_ip()\n\n#networkId\ndeviceInfo.networkId = random.randint(0, 9)\n\n#rssi\ndeviceInfo.rssi = random.randint(-100, -55)\nprint(deviceInfo.rssi)\n\n#phonetime\ndeviceInfo.phoneTime = get_random_time()\nprint(deviceInfo.phoneTime)\n\n\n#copy\nexDeviceInfo.idenf = deviceInfo.idenf\nexDeviceInfo.density = deviceInfo.density\nexDeviceInfo.dpi = deviceInfo.dpi\nexDeviceInfo.scaleDensity = deviceInfo.scaleDensity\nexDeviceInfo.bestProvider = deviceInfo.bestProvider\nexDeviceInfo.gclGetCid = deviceInfo.gclGetCid\nexDeviceInfo.gclGetLac = deviceInfo.gclGetLac\nexDeviceInfo.gclGetPsc = deviceInfo.gclGetPsc\nexDeviceInfo.cellLocation = deviceInfo.cellLocation\nexDeviceInfo.deviceId = deviceInfo.deviceId\nexDeviceInfo.androidid = deviceInfo.androidid\nexDeviceInfo.networkOperator = deviceInfo.networkOperator\nexDeviceInfo.networkOperatorName = deviceInfo.networkOperatorName\nexDeviceInfo.networkType = deviceInfo.networkType\nexDeviceInfo.simSerialNumber = deviceInfo.simSerialNumber\nexDeviceInfo.simOperator = deviceInfo.simOperator\nexDeviceInfo.simOperatorName = deviceInfo.simOperatorName\nexDeviceInfo.subscriberId = deviceInfo.subscriberId\nexDeviceInfo.getSerial = deviceInfo.getSerial\nexDeviceInfo.dataActivity = deviceInfo.dataActivity\nexDeviceInfo.board = deviceInfo.board\nexDeviceInfo.brand = deviceInfo.brand\nexDeviceInfo.bootloader = deviceInfo.bootloader\nexDeviceInfo.display = deviceInfo.display\nexDeviceInfo.device = deviceInfo.device\nexDeviceInfo.fingerPrint = deviceInfo.fingerPrint\nexDeviceInfo.hardwear = deviceInfo.hardwear\nexDeviceInfo.manufacturer = deviceInfo.manufacturer\nexDeviceInfo.model = deviceInfo.model\nexDeviceInfo.product = deviceInfo.product\nexDeviceInfo.relea = deviceInfo.relea\nexDeviceInfo.sdk = deviceInfo.sdk\nexDeviceInfo.sdkInt = deviceInfo.sdkInt\nexDeviceInfo.extraInfo = deviceInfo.extraInfo\nexDeviceInfo.reason = deviceInfo.reason\nexDeviceInfo.subType = deviceInfo.subType\nexDeviceInfo.subTypeName = deviceInfo.subTypeName\nexDeviceInfo.type = deviceInfo.type\nexDeviceInfo.typeName = deviceInfo.typeName\nexDeviceInfo.macAddress = deviceInfo.macAddress\nexDeviceInfo.bssid = deviceInfo.bssid\nexDeviceInfo.ipAddress = deviceInfo.ipAddress\nexDeviceInfo.networkId = deviceInfo.networkId\nexDeviceInfo.ssid = deviceInfo.ssid\nexDeviceInfo.rssi = deviceInfo.rssi\nexDeviceInfo.widthPixels = deviceInfo.widthPixels\nexDeviceInfo.heightPixels = deviceInfo.heightPixels\nexDeviceInfo.width = deviceInfo.width\nexDeviceInfo.height = deviceInfo.height\nexDeviceInfo.rotation = deviceInfo.rotation\nexDeviceInfo.version = deviceInfo.version\nexDeviceInfo.line1Number = deviceInfo.line1Number\nexDeviceInfo.tags = deviceInfo.tags\nexDeviceInfo.phoneTime = deviceInfo.phoneTime\nexDeviceInfo.phoneType = deviceInfo.phoneType\nexDeviceInfo.phoneUser = deviceInfo.phoneUser\nexDeviceInfo.host = deviceInfo.host\nexDeviceInfo.radioVersion = deviceInfo.radioVersion\nexDeviceInfo.codeName = deviceInfo.codeName\nexDeviceInfo.incremental = deviceInfo.incremental\nexDeviceInfo.buildID = deviceInfo.buildID\n\nloop.run_until_complete(exDeviceInfo.save())\n","sub_path":"www/copyExDev.py","file_name":"copyExDev.py","file_ext":"py","file_size_in_byte":6731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67662778","text":"import subprocess\nimport Pyro4.core\n\nclass xfsController(object):\n def xfsfreeze(self, name):\n subprocess.run([\"xfs_freeze\", \"-f\", \"/hana/data\"])\n return \"File system of Data Volumes frozen....\"\n\n def xfsunfreeze(self, name):\n subprocess.run([\"xfs_freeze\", \"-u\", \"/hana/data\"])\n return \"File system of Data Volumes UNfrozen....\"\n\n\ndef main():\n Pyro4.config.SERIALIZERS_ACCEPTED.add('pickle')\n daemon=Pyro4.Daemon(\"10.21.39.45\")\n uri=daemon.register(xfsController)\n\n print (\"The object's uri is:\", uri)\n\n daemon.requestLoop()\n\nif __name__==\"__main__\":\n main()\n","sub_path":"Control_xfs_filesystem.py","file_name":"Control_xfs_filesystem.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"88581187","text":"\n\n# How I think I want the syntax to work\nimport pyobo as obo\n\no = obo(\"v1.2.1\") # New OBO with given format-version\no = obo(file) # Load OBO\n\n# Access Stanzas\no.header # Access the header\no[\"PO:0001\"] # Access Stanza with given ID \no.getTerms() # Returns an iterator of all Terms (with a __len__)\no.getTypdefs() # Returns an iterator of all Typdefs (with a __len__)\no.getInstances() # Returns an iterator of all Instances (with a __len__)\n\n# Create Stanzas\no.Term(\"PO:0001\") # Create new Term with given ID (returns ref to created obj)\no.Typdef(\"PO:0001\") # Create new Typedef with given ID (returns ref to created obj)\no.Instance(\"PO:0001\") # Create new Instance with given ID (returns ref to created obj)\no.add(Stanza, \"PO:0001\") # Create copy of Stanza with given ID\no[\"PO:0001\"] = Stanza # Create copy of Stanza with given ID\n\n# Delete Stanzas\ndel o[\"PO:0001\"]\no.remove(Stanza)\n\n# Access tag/value pairs (Cardinality 0|1)\no.header[\"format-version\"]\no[\"PO:0001\"][\"name\"]\nfor term in o.getTerms():\n print(term[\"id\"])\n\n# Access tag/value pairs (Cardinality n)\no.header[\"subsetdef\"][0]\nfor value in o.header[\"subsetdef\"]:\n print(value)\n \no[\"PO:0001\"][\"synonym\"][0]\nfor value in o[\"PO:0001\"][\"synonym\"]:\n print(value)\n \nfor term in o.getTerms():\n for value in term[\"xref\"]:\n print(value)\n\n# Delete tag/value pairs (Cardinality 0|1), required IDs cannot be deleted\ndel o.header[\"saved-by\"]\ndel o[\"PO:0001\"][\"name\"]\nfor term in o.getTerms():\n del term[\"namespace\"]\n\n# Delete tag/value pairs (Cardinality n)\ndel o.header[\"subsetdef\"][0]\nfor i in range(len(o.header[\"subsetdef\"])):\n del o.header[\"subsetdef\"][i]\n \ndel o[\"PO:0001\"][\"synonym\"][0]\nfor i in range(len(o[\"PO:0001\"][\"synonym\"])):\n del o[\"PO:0001\"][\"synonym\"][i]\n \nfor term in o.getTerms():\n for i in range(len(term[\"xref\"])):\n del term[\"xref\"][i]\n","sub_path":"pyobo/__wishes.py","file_name":"__wishes.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"389942138","text":"\"\"\"\nCreated on Sat Jul 28 17:49:30 2018\n\n@author: vmueller\n\"\"\"\n\n# Data Preprocessing\n# Recurrent Neural Network\n\n\n\n# Part 1 - Data Preprocessing\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom rnn_utility_functions import save_model, load_model\nfrom matplotlib.finance import candlestick_ohlc\nfrom matplotlib import style\nimport matplotlib.dates as mdates\nstyle.use('ggplot')\n\n# First Step is to Import Dataset\ndataset = pd.read_csv('A/A_Price.csv')\ndataset_train = dataset.iloc[3000:4300,:]\ndataset_test = dataset.iloc[4300:,:]\n\n\ndelta_days = 30\n# Importing the training set\ntraining_set = dataset_train.iloc[:, 5:6].values\n\n# Feature Scaling\nfrom sklearn.preprocessing import MinMaxScaler\nsc = MinMaxScaler(feature_range = (0, 1))\ntraining_set_scaled = sc.fit_transform(training_set)\n\n# Creating a data structure with 60 timesteps and 1 output\nX_train = []\ny_train = []\n\nfor i in range(delta_days, 1300):\n X_train.append(training_set_scaled[i-delta_days:i, 0])\n y_train.append(training_set_scaled[i, 0])\nX_train, y_train = np.array(X_train), np.array(y_train)\n\n# Reshaping\nX_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))\n\n\n\n# Part 2 - Building the RNN\n\n# Importing the Keras libraries and packages\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import Dropout\n\n# Initialising the RNN\nregressor = Sequential()\n\n# Adding the first LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50, return_sequences = True, input_shape = (X_train.shape[1], 1)))\nregressor.add(Dropout(0.2))\n\n# Adding a second LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50, return_sequences = True))\nregressor.add(Dropout(0.2))\n\n# Adding a third LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50, return_sequences = True))\nregressor.add(Dropout(0.2))\n\n# Adding a fourth LSTM layer and some Dropout regularisation\nregressor.add(LSTM(units = 50))\nregressor.add(Dropout(0.2))\n\n# Adding the output layer\nregressor.add(Dense(units = 1))\n\n# Compiling the RNN\nregressor.compile(optimizer = 'adam', loss = 'mean_squared_error')\n\n# Fitting the RNN to the Training set\nregressor.fit(X_train, y_train, epochs = 100, batch_size = 32)\n\nsave_model(regressor)\n\nloaded_model = load_model()\n\n# Part 3 - Making the predictions and visualising the results\n\n# Getting the real stock price \nreal_stock_price = dataset_test.iloc[:, 5:6].values\n\n\n# Getting the predicted stock price of 2017\ndataset_total = pd.concat((dataset_train['Adj Close'], dataset_test['Adj Close']), axis = 0)\ninputs = dataset_total[len(dataset_total) - len(dataset_test) - delta_days:].values\ninputs = inputs.reshape(-1,1)\ninputs = sc.transform(inputs)\nX_test = []\nfor i in range(delta_days, len(dataset_test) + delta_days):\n X_test.append(inputs[i-delta_days:i, 0])\nX_test = np.array(X_test)\nX_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\npredicted_stock_price = regressor.predict(X_test)\npredicted_stock_price = sc.inverse_transform(predicted_stock_price)\n\n\npredicted_stock_price_m = loaded_model.predict(X_test)\npredicted_stock_price_m = sc.inverse_transform(predicted_stock_price_m)\n\n\n# Visualising the results\nplt.plot(real_stock_price, color = 'red', label = 'Real Agile Technologies Stock Price')\nplt.plot(predicted_stock_price_m, color = 'blue', label = 'Predicted Agile Technologies Stock Price')\nplt.title('Agilent Technologies (A) Price Prediction')\nplt.xlabel('Time')\nplt.ylabel('Agile Technologies Stock Price')\nplt.legend()\nplt.show()","sub_path":"rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"290617015","text":"import socket\nfrom PIL import Image\nimport re\n\ndef frozen(func):\n\tdef frozenWrapper(*args, **kwargs):\n\t\treturn lambda x: func(x, *args, **kwargs)\n\treturn frozenWrapper\n\nclass ValidationException(Exception):\n\tdef __init__(self, message):\n\t\tself.message = message\n\t\tsuper(ValidationException, self).__init__(message)\n\n@frozen\ndef ValidateInteger(x):\n\ttry:\n\t\tx = int(x)\n\texcept ValueError:\n\t\traise ValidationException('Not an integer.')\n\treturn x\n\n@frozen\ndef ValidateFloat(x):\n\ttry:\n\t\tx = float(x)\n\texcept ValueError:\n\t\traise ValidationException('Not a floating point value.')\n\treturn x\n\n@frozen\ndef ValidateImage(x):\n\ttry:\n\t\tImage.open(x)\n\texcept:\n\t\traise ValidationException('Invalid image.')\n\treturn x\n\n@frozen\ndef ValidateExtension(x, extensions):\n\tdef is_allowed(filename):\n\t\treturn '.' in filename and filename.rsplit('.', 1)[1] in extensions\n\tif not is_allowed(x.filename):\n\t\traise ValidationException('Invalid file extension.')\n\treturn x\n\n@frozen\ndef ValidateLength(x, minimum=None, maximum=None):\n\txlen = len(x)\n\tif minimum is not None:\n\t\tif xlen < minimum:\n\t\t\traise ValidationException('Length must be above {0}.'.format(minimum))\n\tif maximum is not None:\n\t\tif xlen > maximum:\n\t\t\traise ValidationException('Length must be below {0}'.format(maximum))\n\treturn x\n\n@frozen\ndef ValidateCharset(x, charset):\n\tfor i, c in enumerate(x):\n\t\tif not c in charset:\n\t\t\traise ValidationException('Invalid character at position {0}: {1}'.format(i, c))\n\treturn x\n\nemail_regex = re.compile(r'[^@\\s-]+@[^@\\s-]+\\.[^@\\s-]+')\n\n@frozen\ndef ValidateEmail(x, validateDomain=False):\n\tif email_regex.match(x) is None:\n\t\traise ValidationException('Invalid email.')\n\tif validateDomain:\n\t\ttry:\n\t\t\tsocket.getaddrinfo(x.split('@')[1])\n\t\texcept:\n\t\t\traise ValidationException('Invalid email.')\n\treturn x\n","sub_path":"zforms/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"83062979","text":"\"\"\"\nThis file contains all the basic implementations of functions for venous model.\n\"\"\"\n\nimport numpy as np\nfrom scipy.optimize import fsolve\nfrom scipy import integrate\n\nA0 = 7.1e-4\nK = 25*133.322\nrho = 1060\np_d = 15*133.322\nm=10\nn=-3/2\n\ndef boundaries(type,q,param):\n if type==1:\n #u_inflow\n u_inflow = param\n W1 = u_inflow + speed(q[0]) # Left-running characteristic\n W2 = uu(q[:]) - speed(q[0]) # Right-running characteristic\n func = lambda A : W1-W2-2*speed(A)\n A = fsolve(func, q[0])\n u = (W1+W2)/2\n elif type==2:\n #A_out\n A_out = param\n W1 = uu(q) + speed(q[0])\n W2 = W1 - 2*speed(A_out)\n func = lambda A : W1-W2-2*speed(A)\n A = A_out#fsolve(func, A_out)\n u = (W1+W2)*0.5\n elif type==3:\n #A_in\n A_in = param\n W2 = uu(q) - speed(q[0])\n W1 = W2 + 2*speed(A_in)\n func = lambda A : W1-W2-2*speed(A)\n A = A_in #fsolve(func, A_out)\n u = (W1+W2)*0.5\n elif type==4:\n #valves\n Q = param\n W1 = uu(q) + speed(q[0])\n func = lambda A : W1 - Q/A - speed(A)\n A = fsolve(func, q[0])\n u = Q/A\n elif type==5:\n #valves\n Q = param\n W2 = uu(q) - speed(q[0])\n func = lambda A : W2 - Q/A + speed(A)\n A = fsolve(func, q[0])\n u = Q/A\n return [A,u]\n\n####\ndef p(q):\n α = q[0]/A0\n p = p_d + K*(α**m-α**n)\n return p\n\ndef c(q):\n α = q[0]/A0\n c = np.sqrt(K/rho * (m*α**m - n*α**n))\n return c\n\ndef uu(q):\n u = q[1]\n return u\n\ndef speed(A):\n #speed = 4*c([A,0])\n speed = integrate.quad(lambda a: c([a,0])/a, A0,A)[0]\n return speed\n\n\ndef VenousExact(qL,qR,maxvel):\n AL = qL[0,:]\n AR = qR[0,:]\n uL = qL[1,:]\n uR = qR[1,:]\n flux = np.empty_like(qL)\n for i in range(len(AL)):\n func = lambda A : fK(abs(A),AR[i],A0) + fK(abs(A),AL[i],A0)+uR[i]-uL[i]\n A = abs(fsolve(func, (AR[i]+AL[i])*0.5)[0])\n u = 0.5*(uL[i]+uR[i]+fK(A,AR[i],A0)-fK(A,AL[i],A0))\n \n flux[:,i] = EulerFlux([A,u])\n return flux\ndef fK(A,AK,A0):\n #m = 10\n #n = -3/2\n if A<=AK:\n fK = integrate.quad(lambda a: c([a,0])/a, AK,A)[0]\n else:\n #BK = m/(m+1)*(A**(m+1)-AK**(m+1))/A0**m - n/(n+1)*(A**(n+1)-AK**(n+1))/A0**n\n #fK = np.sqrt(K/rho*BK*(A-AK)/(A*AK))\n fK = np.sqrt(2/rho * (p([A,0])-p([AK,0]))/(A**2-AK**2)) * (A-AK)\n return fK\n\ndef EulerFlux(q):\n \"\"\"Purpose: Compute flux for 1D Euler equations.\"\"\"\n f1 = q[0]*q[1]\n f2 = 0.5*q[1]**2 + p(q)/rho\n flux = np.array((f1, f2))\n\n return flux\n\ndef EulerChar(q0):\n \"\"\"Purpose: Compute characteristic decomposition for Euler equations at q0\"\"\"\n #iS A S = Lam\n n = q0.shape[0]\n\n c0 = c(q0)\n A = q0[0]\n u0 = q0[1]\n S = np.zeros((n,n))\n iS = np.zeros((n,n))\n Lam = np.zeros((n,n))\n\n S[0,0] = 1\n S[0,1] = -1\n S[1,0] = c0/A\n S[1,1] = c0/A\n S /= c0/A+c0/A\n\n iS[0,0] = c0/A\n iS[0,1] = 1\n iS[1,0] = -c0/A\n iS[1,1] = 1\n\n Lam[0,0] = u0+c0\n Lam[1,1] = u0-c0\n\n return S,iS,Lam\n\n\n#######\ndef write2file(U,t,rawfile):\n file=open(rawfile,'a')\n np.savetxt(file,np.r_[t, U.ravel()][None],delimiter=',')\n file.close()\n\ndef EulerLF(u, v, maxvel):\n \"\"\"Purpose: Evaluate global Lax Friedrich numerical flux for \n the Euler equations\n\t\"\"\"\n fu = EulerFlux(u)\n fv = EulerFlux(v)\n \n flux = 0.5*(fu+fv)-0.5*maxvel*(v-u)\n return flux","sub_path":"venous.py","file_name":"venous.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"644010638","text":"import os\nimport ujson as json\nimport tabulate\n\n\nclass Writer:\n def add(self, metrics):\n raise NotImplementedError()\n\n\nclass ConsoleWriter(Writer):\n\n def __init__(self):\n self.header = set()\n self.cache = []\n\n def add(self, metrics):\n for k in metrics.keys():\n self.header.add(k)\n self.cache.append(metrics)\n header = sorted(list(self.header))\n rows = []\n for m in self.cache:\n rows.append([m.get(k, None) for k in header])\n s = tabulate.tabulate(rows, headers=header)\n s = \"\\033[F\" * (len(s.splitlines()) - 1) + s\n print(s)\n\n\nclass FileWriter(Writer):\n\n def __init__(self, fname):\n self.fname = fname\n if os.path.isfile(fname):\n os.remove(fname)\n\n def add(self, metrics):\n with open(self.fname, 'a') as f:\n f.write('{}\\n'.format(json.dumps(metrics)))\n f.flush()\n\n\n\nif __name__ == '__main__':\n w = FileWriter('my_exp')\n import time\n for i in range(10):\n print(i)\n w.add({'iteration': i, 'score': i+1})\n time.sleep(1)\n","sub_path":"dashboard/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"95684267","text":"__author__ = 'Jiggy'\n\ndividend = int(input(\"Enter the dividend:\"))\ndivisor = int(input(\"Enter the divisor:\"))\ntempdivisor = divisor\nremainder = 0\n\ndef division (dividend, divisor):\n\n global remainder\n\n quotient = 1\n\n if divisor == dividend:\n remainder = 0\n return 1\n elif dividend < divisor:\n remainder = dividend\n return 0\n\n while divisor <= dividend:\n #Here divisor < dividend, therefore left shift (multiply by 2) divisor and quotient\n divisor = divisor << 1\n quotient = quotient << 1\n\n #We have reached the point where divisor > dividend, therefore divide divisor and quotient by two\n divisor = divisor >> 1\n quotient = quotient >> 1\n\n #Call division recursively for the difference to get the exact quotient\n print('Dividend {}, tempdivisor {} quotient {}'.format(dividend, tempdivisor, quotient))\n quotient = quotient + division(dividend - divisor, tempdivisor)\n\n return quotient\n\nprint(\"%s / %s: quotient = %s\" % (dividend, tempdivisor, division(dividend, divisor)))\nprint(\"%s / %s: remainder = %s\" % (dividend, tempdivisor, remainder))","sub_path":"TempDivisor.py","file_name":"TempDivisor.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"108453815","text":"\"\"\"Deal with argument parsing for the toolkit\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport re\nimport sys\nfrom glob import glob\nfrom typing import Any, Dict, List, Tuple\n\nfrom natsort import natsorted\n\nfrom . import global_vars\nfrom .get_students import get_students\nfrom .subcommands import do_ci, do_drive, do_record, do_repo_clean, do_repo_update, do_table, do_web\nfrom ..common import version\nfrom ..specs.spec_repos import format_supported_course_list\n\nASSIGNMENT_REGEX = re.compile(r'^(HW|LAB|WS)', re.IGNORECASE)\n\n\ndef build_argparser():\n \"\"\"Construct the argument list and parse the passed arguments\"\"\"\n parser = argparse.ArgumentParser(description='The core of the StoGrade toolkit')\n\n # Common arguments\n parser.add_argument('-v', '--version', action='store_true',\n help='Print the version of the toolkit and exit')\n\n base_options = argparse.ArgumentParser(description='common options')\n base_options.add_argument('--skip-version-check', '-V', action='store_true',\n default=os.getenv('STOGRADE_SKIP_VERSION_CHECK', False) is not False,\n help='Skips the pypi update check')\n base_options.add_argument('--skip-dependency-check', action='store_true',\n help='Skip checking for dependencies')\n base_options.add_argument('--skip-spec-update', '-S', action='store_true',\n help='Skip checking for spec updates')\n base_options.add_argument('--debug', action='store_true',\n help='Enable debugging mode (throw errors, implies -w1)')\n base_options.add_argument('--no-progress-bar', action='store_true',\n help='Hide the progress bar')\n base_options.add_argument('--workers', '-w', type=int, default=os.cpu_count(), metavar='N',\n help='The number of operations to perform in parallel')\n\n # Repository url modifiers\n repo_selection = argparse.ArgumentParser(add_help=False)\n repo_selection.add_argument('--course', default='', metavar='ID',\n help='Which course to evaluate '\n '(this sets a default stogit url and downloads the correct specs). '\n 'Can be {} or one of the previous with /f## or /s## (i.e. sd/s19)'\n .format(format_supported_course_list(delimiter=', ')))\n repo_selection.add_argument('--stogit', metavar='URL',\n help='Use an alternate stogit base URL (eg, git@stogit.cs.stolaf.edu:sd/s17)')\n\n # Recording options\n record_options = argparse.ArgumentParser(add_help=False)\n record_options.add_argument('--clean', action='store_true',\n help='Remove student folders and re-clone them')\n record_options.add_argument('--skip-repo-update', '-R', action='store_true',\n help='Do not update the student folders when checking')\n record_options.add_argument('--date', action='store', metavar='GIT_DATE',\n help=('Check out last submission on GIT_DATE (eg, \"last week\", \"tea time\", \"2 hrs ago\")'\n '(see `man git-rev-list`)'))\n record_options.add_argument('--format', action='store', choices=['md', 'html'], default='md',\n help='Set the output format')\n\n compile_options = argparse.ArgumentParser(add_help=False)\n compile_options.add_argument('--skip-web-compile', action='store_true',\n help='Skip compilation and testing of files marked with web: true')\n\n # Student selection\n student_selection = argparse.ArgumentParser(add_help=False)\n selection_args = student_selection.add_argument_group('student selection')\n selection_args.add_argument('--students', '--student', action='append', nargs='+', metavar='USERNAME', default=[],\n help='Only iterate over these students.')\n selection_args.add_argument('--section', action='append', dest='sections', nargs='+', metavar='SECTION', default=[],\n help='Only check these sections: my, all, a, b, etc.')\n\n # Table Printout Parent Parser\n table_options = argparse.ArgumentParser(add_help=False)\n table_options_args = table_options.add_argument_group('table printout options')\n table_options_args.add_argument('--sort', dest='sort_by', action='store', default='name', type=str,\n choices=['name', 'count'], help='Sort the students table')\n table_options_args.add_argument('--no-partials', '-P', action='store_true',\n help=\"Don't highlight partial submissions\")\n\n # SubParsers\n sub_parsers = parser.add_subparsers(dest='command')\n\n # CI SubParser\n parser_ci = sub_parsers.add_parser('ci', parents=[base_options, compile_options], conflict_handler='resolve',\n help=\"Check a single student's assignment as part of a CI job\")\n parser_ci.set_defaults(func=do_ci) # Set function to run from subcommands.py\n\n # Drive SubParser\n parser_drive = sub_parsers.add_parser('drive', parents=[base_options, student_selection],\n conflict_handler='resolve', help='Manage submissions via google drive')\n parser_drive.set_defaults(func=do_drive) # Set function to run from subcommands.py\n parser_drive.add_argument('assignments', nargs=1, metavar='HW',\n help='An assignment to process')\n parser_drive.add_argument('--email', '-e', required=True,\n help='Set the email of the group that documents are shared with '\n '(e.g. hd-tas@stolaf.edu)')\n\n # Record SubParser\n parser_record = sub_parsers.add_parser('record', help=\"Record students' work\",\n parents=[base_options, record_options, compile_options,\n repo_selection, table_options, student_selection],\n conflict_handler='resolve')\n parser_record.set_defaults(func=do_record) # Set function to run from subcommands.py\n parser_record.add_argument('assignments', nargs='+', metavar='HW',\n help='An assignment to process')\n parser_record.add_argument('--table', '-t', action='store_true',\n help='Show the overview table after recording is complete')\n parser_record.add_argument('--gist', action='store_true',\n help='Post overview table and student recordings as a private gist')\n parser_record.add_argument('--interact', action='store_true',\n help=\"Interact with each student's submission individually\")\n parser_record.add_argument('--skip-branch-check', '-B', action='store_true',\n help='Do not check for unmerged branches')\n\n # Repo SubParser\n parser_repo = sub_parsers.add_parser('repo', help='Tools for cloning and updating student repositories',\n conflict_handler='resolve')\n repo_sub_parsers = parser_repo.add_subparsers()\n repo_sub_parsers.add_parser('clean', aliases=['reclone'], help='Remove and reclone student repositories',\n parents=[base_options, repo_selection, student_selection],\n conflict_handler='resolve'\n ).set_defaults(func=do_repo_clean) # Set function to run from subcommands.py\n repo_sub_parsers.add_parser('update', aliases=['clone'], help='Clone and/or update student repos',\n parents=[base_options, repo_selection, student_selection],\n conflict_handler='resolve'\n ).set_defaults(func=do_repo_update) # Set function to run from subcommands.py\n\n # Table SubParser\n parser_table = sub_parsers.add_parser('table', help='Print an table of the assignments submitted by students',\n parents=[base_options, record_options, compile_options, repo_selection,\n table_options, student_selection],\n conflict_handler='resolve')\n parser_table.set_defaults(func=do_table) # Set function to run from subcommands.py\n\n # Web SubParser\n parser_web = sub_parsers.add_parser('web', help='Run the CLI for grading React App files',\n parents=[base_options, record_options, compile_options,\n repo_selection, student_selection],\n conflict_handler='resolve')\n parser_web.set_defaults(func=do_web) # Set function to run from subcommands.py\n parser_web.add_argument('assignments', nargs=1, metavar='HW',\n help='An assignment to process')\n parser_web.add_argument('--port', type=int, required=True,\n help='Set the port for the server to use')\n\n return parser\n\n\ndef get_ci_assignments() -> List[str]:\n \"\"\"Find assignments in the student's repository during a CI job\"\"\"\n all_assignments: List[str] = []\n dirs = glob('hw*') + glob('lab*') + glob('ws*')\n for line in dirs:\n all_assignments.append(line.split('/')[-1])\n return natsorted(set(all_assignments))\n\n\ndef process_args() -> Tuple[Dict[str, Any], List[str], List[str]]:\n \"\"\"Process the arguments and create usable data from them\"\"\"\n parser = build_argparser()\n args = vars(parser.parse_args())\n\n if args['version']:\n print('version', version)\n sys.exit(0)\n\n global_vars.DEBUG = args.get('debug', False)\n logging.basicConfig(level=logging.DEBUG if global_vars.DEBUG else logging.WARNING)\n\n command: str = args['command']\n\n # Prepare assignments and students for each SubCommand, along with other necessary variables\n # Note that the SubCommand is not run here, that is done by main() (which called this)\n\n # ci SubCommand\n if command == 'ci':\n assignments = get_ci_assignments()\n students = [os.environ['CI_PROJECT_NAME']]\n args['course'] = os.environ['CI_PROJECT_NAMESPACE']\n global_vars.CI = True\n\n elif command == 'drive':\n assignments = natsorted(set(args['assignments']))\n students = get_students(args)\n args['course'] = ''\n\n # record SubCommand\n elif command == 'record':\n assignments = natsorted(set(args['assignments'])) # Has at least one assignment (enforced by argparser)\n students = get_students(args)\n\n # repo SubCommand\n elif command == 'repo':\n assignments = []\n students = get_students(args)\n\n # table SubCommand\n elif command == 'table':\n assignments = [] # Will be filled in later once we know that the data/specs directory exists\n students = get_students(args)\n\n # web SubCommand\n elif command == 'web':\n assignments = natsorted(set(args['assignments'])) # Has only one assignment (enforced by argparser)\n students = get_students(args)\n\n else:\n print('Sub-command must be specified', file=sys.stderr)\n sys.exit(1)\n\n if not students:\n print('No students selected', file=sys.stderr)\n print('Is your students.txt missing?', file=sys.stderr)\n sys.exit(1)\n\n debug_print_args(args)\n debug_print_students(students)\n debug_print_assignments(assignments)\n\n return args, students, assignments\n\n\ndef debug_print_args(args: Dict[str, Any]):\n logging.debug(\"Command Line Arguments:\")\n for arg, value in args.items():\n logging.debug(\"{}: {}\".format(arg, str(value)))\n\n\ndef debug_print_students(students: List[str]):\n logging.debug(\"Students:\")\n debug_print_grid(students)\n\n\ndef debug_print_assignments(assignments: List[str]):\n logging.debug(\"Assignments:\")\n debug_print_grid(assignments)\n\n\ndef debug_print_grid(items: List[str]):\n line = \"\"\n for i, item in enumerate(items):\n line += item.ljust(10)\n if i % 5 == 4 or i == len(items) - 1:\n logging.debug(line)\n line = \"\"\n","sub_path":"stograde/toolkit/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":12424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"506032546","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 10 10:34:06 2018\n\n@author: lukas\n\"\"\"\n\nimport os\nwd = os.getcwd()\n\nimport tensorflow as tf\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.visible_device_list=\"2\"\ntf.keras.backend.set_session(tf.Session(config=config))\n\n################### parameters // replace with config files ########################\n\ndataset = 'MSKCC'\n\n############################## Load dataset #############################\n \nTPM_channel = ''\n\nsegmentChannels = ['/CV_folds/CV_alignedNii-Aug2019_actual-F4-training/test_t1post_1.txt',\n\t\t '/CV_folds/CV_alignedNii-Aug2019_actual-F4-training/test_sub_1.txt']\nsegmentLabels = ''\n\noutput_classes = 2\n \n#-------------------------------------------------------------------------------------------------------------\n\n# Parameters \n\n######################################### MODEL PARAMETERS\n# Models : 'CNN_TPM' , 'DeepMedic'\n\nmodel = 'MultiPriors_v2' \n# Rules for patch sizes:\n# Anything that is divisible by 3 (on positions 2 and 3). They have to be > 12, > 66 , > 66, respectively\ndpatch=[13,75,75]\nsegmentation_dpatch = [25,99,99] \nmodel_patch_reduction = [12,66,66] #for normal model.\nmodel_crop = 40 #for normal model.\n\npath_to_model = '/home/deeperthought/Projects/MultiPriors_MSKCC/training_sessions/MultiPriors_v2_MSKCC_configFile_MultiPriors_v2_F4_2019-12-08_1704/models/v2_MSKCC_configFile_MultiPriors_v2_F4_2019-12-08_1704.log_epoch28.h5'\nsession = path_to_model.split('/')[-3]\n\n########################################### TEST PARAMETERS\nquick_segmentation = True\noutput_probability = True \ndice_compare = False\nsave_as_nifti = True \npercentile_normalization = True\nuse_coordinates = True\nfull_segmentation_patches = True\ntest_subjects = 278\nn_full_segmentations = 278\nlist_subjects_fullSegmentation = range(278)\nsize_test_minibatches = 32\nsaveSegmentation = True\n\nimport numpy as np\npenalty_MATRIX = np.array([[ 1, 0],\n\t\t\t [ 0, 1]], dtype='float32')\n\ncomments = ''\n\n","sub_path":"configFiles/segmentation/Segmentation_MultiPriors_v2.py","file_name":"Segmentation_MultiPriors_v2.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448761236","text":"\"\"\"\n Email Module\n _______________\n responsible to sending email via sengrid\n\"\"\"\nimport os\nimport base64\n\n# send grid\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import (\n Mail,\n Attachment,\n FileContent,\n FileName,\n FileType,\n Disposition,\n ContentId,\n)\nfrom sendgrid.helpers.mail.attachment import Attachment\nfrom sendgrid.helpers.mail.content import Content\n\nfrom rpc.lib.exceptions import BaseError\nfrom rpc.const import EMAIL_STATIC, EMAIL_TEMPLATES\n\nAPI_KEY = os.environ.get(\"EMAIL_API_KEY\")\n\n\nclass EmailError(BaseError):\n \"\"\" raised when something went wrong while sending email \"\"\"\n\n\ndef create_attachment(filename):\n \"\"\" create sendgrid attachment \"\"\"\n with open(filename, \"rb\") as f:\n data = f.read()\n\n # split filename only\n folder, filename = filename.split(\"/\")\n\n encoded = base64.b64encode(data).decode()\n attachment = Attachment()\n attachment.file_content = FileContent(encoded)\n attachment.file_type = FileType(\n \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\n )\n attachment.file_name = FileName(filename)\n attachment.disposition = Disposition(\"attachment\")\n attachment.content_id = ContentId(\"Example Content ID\")\n return attachment\n\n\ndef prepare_email(sender, to, subject, html_template, attachment=None):\n \"\"\" prepare sendgrid email \"\"\"\n message = Mail(\n from_email=sender, to_emails=to, subject=subject, html_content=html_template\n )\n\n if attachment is not None:\n message.attachment = attachment\n\n sg = SendGridAPIClient(API_KEY)\n return sg, message\n\n\ndef execute(recipients, product_type, email_type, html_template, filename=None):\n \"\"\" send email through sendgrid \"\"\"\n # convert filename to attachment\n attachment = None\n if filename is not None:\n attachment = create_attachment(filename)\n\n # based on product get right subject and sender!\n sender = EMAIL_STATIC[product_type][\"FROM\"]\n subject = EMAIL_TEMPLATES[product_type][\"SUBJECT\"][email_type]\n\n sg, mail = prepare_email(sender, recipients, subject, html_template, attachment)\n\n try:\n response = sg.send(mail)\n except Exception as e:\n raise EmailError(\"SENDING_FAILED\", e)\n else:\n print(response.status_code)\n return response\n","sub_path":"notification-services/rpc/lib/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"596081831","text":"import sys\nfrom . import scrapers\nfrom . import cli\nfrom .database import Database\nfrom .nlp.spacy import analyse\nfrom .utils import install_spacy_required_packages, run_streamlit_dashboard\n\n__author__ = \"@jordanparker6\"\n\ndef main():\n cli.log(\"Newsreader CLI\", style=\"#00b3ff\", figlet=True)\n cli.log(\"Welcome to Newsreader CLI\", style=\"#07b05b bold\")\n database = cli.askDatabaseInfo()\n db = Database(database[\"uri\"])\n methods = cli.askMethodInfo()[\"methods\"]\n cfg = cli.askQuestions(methods)\n\n # 1) Run Scrapers\n if \"scrapers\" in methods:\n selected_scrapers = cfg[\"scrapers\"][\"classes\"]\n del cfg[\"scrapers\"][\"classes\"]\n config = scrapers.ScraperConfig(**cfg[\"scrapers\"], headless=True)\n for Scraper in scrapers.ScraperBase.__subclasses__():\n if Scraper.__name__ in selected_scrapers:\n scraper = Scraper(db, config)\n scraper.start()\n\n # 2) Run NLP Pipeline\n if \"nlp\" in methods:\n model = cfg[\"nlp\"][\"model\"]\n install_spacy_required_packages(model)\n analyse(database[\"uri\"], model)\n\n # 3) Serve Dashboard\n if \"dashboard\" in methods:\n cli.log(\"Serving your dashboard...\", style=\"#07b05b bold\")\n run_streamlit_dashboard(database[\"uri\"])\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"newsreader/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313848243","text":"'''\nCreated on Sep 29, 2014\n\n@author: dennis\n'''\nimport unittest\n\n#from tokens_app.get_instance import app\nfrom config import config\nfrom tokens_app.tokens import Tokens\n\nsilence_all = False\ntest_name = 'test_fetch_last_change_index'\n\nclass TokensTest(unittest.TestCase):\n\tdef setUp(self):\n\t\t#app.config['TESTING'] = True\n\t\t#self.app = app.test_client()\n\t\tself.tokens = Tokens()\n\n\tdef tearDown(self):\n\t\t#os.close(self.db_fd)\n\t\t#os.unlink(buildings.app.config['DATABASE'])\n\t\tpass\n\t@unittest.skipIf(test_name != 'test_notify' and silence_all, '')\n\tdef test_notify(self):\n\t\t#rv = self.app.get('/inf/v1/tokens/0')\n\t\t#self.assertTrue(rv.status_code == 200)\n\t\tpass\n\n\t@unittest.skipIf(test_name != 'test_fetch_last_change_index' and silence_all, '')\n\tdef test_fetch_last_change_index(self):\n\t\tlast_change_index = self.tokens.fetch_last_change_index()\n\t\t\n\t\tself.assertTrue(last_change_index > 0)\n\t\n\t@unittest.skipIf(test_name != 'test_fetch_updates' and silence_all, '')\n\tdef test_fetch_updates(self):\n\t\tupdates = self.tokens.fetch_updates(0)\n\t\t\n\t\tself.assertTrue(len(updates) > 0)\n\t\tself.assertTrue('user' in updates[0])\n\t\tself.assertTrue('token' in updates[0])\n\t\tself.assertTrue('update' in updates[0])\n\t\tself.assertTrue('action' in updates[0])\n\t\tself.assertTrue(len(updates[0]) == 4)\n\t\t\n\t@unittest.skipIf(test_name != 'test_fetch_tokens' and silence_all, '')\n\tdef test_fetch_tokens(self):\n\t\ttokens = self.tokens.fetch_tokens()\n\t\tself.assertTrue(len(tokens) > 0)\n\t\tself.assertTrue('user' in tokens[0])\n\t\tself.assertTrue('token' in tokens[0])\n\t\tself.assertTrue('update' in tokens[0])\n\t\tself.assertTrue(len(tokens[0]) == 3)\n\t\t\n\t@unittest.skipIf(test_name != 'test_fetch_scope' and silence_all, '')\n\tdef test_fetch_scope(self):\n\t\tscope_name = '/people/hold/advise'\n\t\tscope = self.tokens.fetch_scope(scope_name)\n\t\tself.assertTrue(len(scope) > 0)\n\t\tself.assertTrue('user' in scope[0])\n\t\tself.assertTrue(len(scope[0]) == 1)\n\t\t\n\t@unittest.skipIf(test_name != 'test_fetch_scope_names' and silence_all, '')\n\tdef test_fetch_scope_names(self):\n\t\tscope_name = '/people/hold/advise'\n\t\tscopes = self.tokens.fetch_scope_names()\n\t\tself.assertTrue(len(scopes) > 3)\n\t\tself.assertTrue(scope_name in scopes)\n\t\t\n\t@unittest.skipIf(test_name != 'test_calc_hmac' and silence_all, '')\n\tdef test_calc_hmac(self):\n\t\tdatetime_str = '2015-05-27T23:57:15Z'\n\t\t(dt, hmac) = self.tokens.calc_hmac(datetime_str=datetime_str)\n\t\tself.assertTrue(hmac == 'I_-4JvOeDIQl4ifHUl1816EobOc3zVYCvLSQohN3G8k=')\n\t\tprint( 'dt: ' + dt + ', hmac: ' + hmac)\n\t\t\nif __name__ == \"__main__\":\n\tunittest.main()","sub_path":"tests/test_tokens.py","file_name":"test_tokens.py","file_ext":"py","file_size_in_byte":2547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"566088575","text":"import pickle\nimport numpy as np\nfrom IPython.core.debugger import set_trace\nimport mne\nimport matplotlib.pyplot as plt\nfrom scipy.stats import sem\nfrom mne.stats import permutation_cluster_1samp_test\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom scipy import stats as stats\n\ndef pickle_save(save_name, data):\n with open(save_name, 'wb') as f:\n pickle.dump(data, f)\n\ndef pickle_load(load_name):\n with open(load_name, 'rb') as f:\n out = pickle.load(f)\n return out\n\ndef epochs_hilbert(epochs, frequencies, crop):\n #from https://github.com/mne-tools/mne-python/issues/4869\n \"\"\"\n Hilbert transformation on epochs data\n\n Parameters\n ----------\n frequencies : tuple (Names, freq_min, freq_max)\n Name and values of the frequency band to explore.\n crop : tuple (tmin, tmax)\n New crop to apply after the Hilbert transform.\n baseline: int\n Baseline to apply after the Hilbert transform.\n\n Returns\n -------\n epochs : instance of Epochs\n The epochs object with transformed data.\n \"\"\"\n from scipy.signal import hilbert\n import mne\n import numpy as np\n band, fmin, fmax = frequencies\n\n epochs.filter(fmin, fmax, n_jobs=1,\n l_trans_bandwidth=1,\n h_trans_bandwidth=1,\n fir_design='firwin')\n\n\n\n # Hilbert transformation\n #data_h = hilbert(epochs._data)\n\n # Preserve mne-epochs format\n epochs._data = hilbert(epochs._data)\n\n # Crop the new epochs\n #epochs.crop(tmin = crop[0], tmax = crop[1])\n\n # Aplly baseline\n #epochs.apply_baseline(baseline=baseline)\n\n # Remove evoked response and get analytic signal (envelope)\n epochs.subtract_evoked()\n data_h = epochs._data\n\n epochs_h = mne.EpochsArray(data=np.abs(data_h),\n info=epochs.info,\n tmin=epochs.tmin,\n events=epochs.events,\n event_id= epochs.event_id)\n epochs_h.crop(tmin = crop[0], tmax = crop[1])\n return epochs_h\n\n\ndef plot_hilbert(frequency_map, baseline, condition):\n \"\"\"\n Plot results from Hilbert transformation\n\n \"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n from mne.baseline import rescale\n from mne.stats import _bootstrap_ci\n\n fig, axes = plt.subplots(len(frequency_map), 1, figsize=(10, 7), sharex=True, sharey=True)\n colors = plt.get_cmap('winter_r')(np.linspace(0, 1, 5))\n\n for ((freq_name, fmin, fmax), epoch), color, ax in zip(\n frequency_map, colors, axes.ravel()[::-1]):\n #set_trace()\n average = epoch[condition].average()\n times = average.times\n # Compute GFP\n gfp = np.sum(average.data ** 2, axis=0)\n gfp = mne.baseline.rescale(gfp, times, baseline=baseline)\n\n ax.plot(times, gfp, label=freq_name, color=color, linewidth=2.5)\n ax.axhline(0, linestyle='--', color='grey', linewidth=2)\n\n # Compute CI\n ci_low, ci_up = _bootstrap_ci(average.data, random_state=0,\n stat_fun=lambda x: np.sum(x ** 2, axis=0))\n\n ci_low = rescale(ci_low, average.times, baseline=baseline)\n ci_up = rescale(ci_up, average.times, baseline=baseline)\n\n ax.fill_between(times, gfp + ci_up, gfp - ci_low, color=color, alpha=0.3)\n ax.grid(True)\n ax.set_ylabel('GFP')\n ax.annotate('%s (%d-%dHz)' % (freq_name, fmin, fmax),\n xy=(0.95, 0.8),\n horizontalalignment='right',\n xycoords='axes fraction')\n ax.set_xlim(epoch.tmin, epoch.tmax)\n\n axes.ravel()[-1].set_xlabel('Time [ms]')\n\n'''\ndef combine_events(y, dict1, dict2, ev_mapping):\n # For wake data; hierachical events_id\n from IPython.core.debugger import set_trace\n #from config import ev_mapping\n #possible keys: familiar, unfamiliar, own, un\n new_mapping = {**dict1, **dict2}\n #assert new values differ from the existing ones\n assert all([v not in ev_mapping.values() for v in new_mapping.values()])\n #assert new keys are present in the existing ones\n assert all([k in ''.join(list(ev_mapping.keys())) for k in new_mapping.keys()] )\n new_y = np.zeros(len(y), dtype=int)\n ev_mapping_rev = {v:k for k,v in ev_mapping.items()} #reverse k,v mapping\n for i in range(len(new_y)):\n old_k = ev_mapping_rev[y[i]]\n\n if ('familiar'or 'unfamiliar') in [*new_mapping.keys()]:\n split_idx = 0\n\n elif ('own' or 'un') in [*new_mapping.keys()]:\n split_idx = 1\n\n new_y[i] = new_mapping[old_k.split('/')[split_idx]]\n return new_y\n'''\n\ndef combine_events_v2(y, mapp1, mapp2, ev_mapping):\n newy= np.zeros(len(y))\n new_mapp = {**mapp1, **mapp2}\n\n def convert_slashed_mapping(d):\n # Convert dict values (slashed string => list of strings)\n reversed = {v: k for k, v in d.items()}\n return {k : [*v.split('/')] for k,v in reversed.items()}\n\n old_mapp = convert_slashed_mapping(ev_mapping)\n new_mapp = convert_slashed_mapping(new_mapp)\n\n for i, _ in enumerate(y): # Loop over y numeric label\n this = old_mapp[y[i]] # Get list of events for given y\n for k, v in new_mapp.items(): # Loop over new mapping, get key where all values match\n match = all( v[idx] in this for idx in range(len(v)) )\n if match:\n newy[i] = k\n return newy\n\n#def balance_classes(X,y):\n# from imblearn.under_sampling import RandomUnderSampler\n# rus = RandomUnderSampler(random_state=0)\n# if X.ndim > 2:\n# d1, d2, d3, d4 = X.shape\n# X = X.reshape((d1,-1))\n# X, y = rus.fit_sample(X, y)\n# return (X.reshape([-1,d2,d3,d4]), y)\n\ndef balance_classes_v2(y):\n from imblearn.under_sampling import RandomUnderSampler\n rus = RandomUnderSampler(random_state=0)\n no_epochs = len(y)\n indices = np.arange(no_epochs).reshape([-1,1])\n _, _ = rus.fit_resample(indices, y) #return only indices, no resampling\n return rus.sample_indices_\n\n\ndef plot_time_cross(data, index='slide_time', start=-0.2, stop=1., ax=None):\n if ax is None:\n fig, ax = plt.subplots()\n\n for (this_data, data_type, col, pval_offset) in zip(data, ['wake_scores', 'sleep_scores'],\n ['blue', 'green'], [0, 0.01]):\n\n data_arr = np.asarray([this_data.loc[index][sbj] for sbj in range(17)])\n\n if data_arr.ndim > 2: #multiscore\n data_arr = data_arr.mean(1) #mean folds\n #mean_arr = data_arr[1:].mean(0) #mean sbjs, NOTE skip sbj1\n mean_arr = data_arr.mean(0)\n\n sem_arr = sem(data_arr, axis=0)\n n_time = len(mean_arr)\n times = np.linspace(start, stop, n_time)\n ax.plot(times, mean_arr, label=data_type, c=col)\n ax.fill_between(times, mean_arr - sem_arr, mean_arr + sem_arr,\n color='black', alpha=0.2, edgecolor='none')\n #statistics\n X = data_arr-0.5\n p_threshold = 0.05\n n_subjects = len(X)\n t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects)\n\n T_obs, clusters, cluster_p_values, H0 = permutation_cluster_1samp_test(X, threshold=t_threshold,\n seed=12)\n print(cluster_p_values)\n for i_c, c in enumerate(clusters):\n c = c[0]\n if cluster_p_values[i_c] <= 0.05:\n h = ax.axvspan(times[c.start], times[c.stop - 1], ymin=0.1+pval_offset,\n ymax=0.12+pval_offset, color=col, alpha=0.3)\n ax.axhline(.5, color='r', linestyle='--', label='chance')\n ax.axvline(0., color='black', linestyle='--')\n ax.set_xlabel('Time [s]')\n ax.set_ylabel('Accuracy [%]')\n ax.set_xlim(xmin=times[0], xmax=times[-1])\n ax.set_title(index.upper())\n ax.legend()\n return\n\n\ndef my_combine_events(epochs, new_events, old_event_ids):\n new_ids = [111, 222]\n new_mapping = [{new_events[i] : new_ids[i]} for i in range(2)] #new events to ids dictionary\n\n for i in range(len(old_event_ids)):\n mne.epochs.combine_event_ids(epochs, old_event_ids=old_event_ids[i], new_event_id=new_mapping[i], copy=False)\n return epochs\n\ndef my_equalize_events(epochs, new_events):\n from mne.epochs import equalize_epoch_counts \n from mne import concatenate_epochs\n e1_ = epochs[new_events[0]]\n e2_ = epochs[new_events[1]]\n equalize_epoch_counts([e1_, e2_]) #balance classes\n return concatenate_epochs([e1_, e2_])\n\ndef my_resampler(y, X, indices):\n from sklearn.preprocessing import LabelBinarizer \n from sklearn.utils import shuffle\n X, y = shuffle(X, y)\n sampled_y = []\n sampled_X = []\n new_id_v = np.unique(y) \n for i in range(len(new_id_v)):\n sampled_y.append(y[y==new_id_v[i]][indices])\n sampled_X.append(X[y==new_id_v[i]][indices, ...])\n sampled_y = np.hstack(sampled_y)\n sampled_X = np.vstack(sampled_X)\n print('prior sampling count: {}'.format(len(y)))\n print('after sampling count: {}'.format(len(sampled_y)))\n assert(len(sampled_y) == sampled_X.shape[0])\n sampled_y = LabelBinarizer().fit_transform(sampled_y).squeeze()\n return sampled_y, sampled_X\n\ndef my_resampler_v2(y, X, tosample):\n from sklearn.preprocessing import LabelBinarizer \n from sklearn.utils import shuffle\n X, y = shuffle(X, y)\n\n sampled_y = []\n sampled_X = []\n new_id_v = np.unique(y) \n count0 = len(y[y==0]) #count for class 1\n count1 = len(y[y==1]) #count for class 2\n indices0 = np.random.permutation(np.arange(count0))[:tosample]\n indices1 = np.random.permutation(np.arange(count1))[:tosample]\n for i in range(len(new_id_v)):\n if i == 0:\n indices = indices0\n else:\n indices = indices1\n sampled_y.append(y[y==new_id_v[i]][indices])\n #set_trace()\n sampled_X.append(X[y==new_id_v[i]][indices, ...])\n sampled_y = np.hstack(sampled_y)\n sampled_X = np.vstack(sampled_X)\n print('prior sampling count: {}'.format(len(y)))\n print('after sampling count: {}'.format(len(sampled_y)))\n assert(len(sampled_y) == sampled_X.shape[0])\n sampled_y = LabelBinarizer().fit_transform(sampled_y).squeeze()\n return sampled_y, sampled_X","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"15419841","text":"import pytest\nfrom hamcrest import *\nimport json\n\nfrom Convertor.framework.convertor import Convertor\n\nclass TestIntegratedConsoleParser:\n def setup_method(self, method):\n self.__input_sample = 'samples/input.json'\n self.__output_sample = 'samples/output.json'\n\n def test_ReadInputAndOutputSamplesFromFileWhenCallParseItShouldReturnCorrectOutput(self):\n with open(self.__input_sample, 'r') as input_sample:\n input_data = json.load(input_sample)\n \n with open(self.__output_sample, 'r') as output_sample:\n output_data = json.load(output_sample)\n \n convertor = Convertor()\n args = [\"currency\", \"country\", \"city\"]\n result = convertor.convert(json.dumps(input_data), args)\n assert_that(result, equal_to(output_data))\n","sub_path":"Convertor/tests/integration/test_integrated_convertor.py","file_name":"test_integrated_convertor.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"614711610","text":"#script que permite crear un resumen estadistico del set de datos...\n\nimport sys\nimport pandas as pd\nimport numpy as np\n\n#recibimos los datos de entrada...\ndataSet = pd.read_csv(sys.argv[1])\npathOutput = sys.argv[2]\n\n#lista de estadisticos\nmeanData = []\nstdData = []\nvarData = []\nminData = []\nmaxData = []\n\nkeyData = []\nfor key in dataSet:\n if key not in ['ID', 'Sex']:\n keyData.append(key)\n meanData.append(np.mean(dataSet[:][key]))\n stdData.append(np.std(dataSet[:][key]))\n varData.append(np.var(dataSet[:][key]))\n minData.append(min(dataSet[:][key]))\n maxData.append(max(dataSet[:][key]))\n\n#formamos el data set con la informacion...\nmatrixData = [meanData, stdData, varData, minData, maxData]\n\n#creamos el data frame...\ndataFrame = pd.DataFrame(matrixData, columns=keyData, index=['Mean', 'Std', 'Variance', 'Min', 'Max'])\ndataFrame.to_csv(pathOutput+\"statisticSummary.csv\")\n","sub_path":"scripts/statisticSummary.py","file_name":"statisticSummary.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"336730758","text":"import os\nimport re\nimport glob\nfrom cgl.core.utils.read_write import load_json, save_json\n\n\ndef get_user_config_file():\n \"\"\"\n returns the location of the user config file.\n :return:\n \"\"\"\n\n environment_variable = os.getenv('cgl_user_globals')\n\n\n if not environment_variable:\n try:\n\n path = \"~\\\\Documents\\\\cglumberjack\\\\user_globals.json\"\n file = os.path.expanduser(path)\n print(file)\n if os.path.isfile(file):\n print('getting path {}'.format(path))\n return file\n except:\n print('no user globals found')\n raise\n pass\n\n return environment_variable\n\n\ndef user_config():\n \"\"\"\n returns dictionary representing user config.\n :return:\n \"\"\"\n user_config_file = get_user_config_file()\n\n if os.path.isfile(user_config_file):\n\n config = load_json(user_config_file)\n return config\n else:\n print('error finding user globals file in : {} '.format(user_config_file))\n\n\ndef get_sync_config_file():\n \"\"\"\n gets the location of the sync config file.\n :return:\n \"\"\"\n filepath = ProjectConfig().sync_config_file\n if os.path.exists(filepath):\n print('Sync Exists {}'.format(filepath))\n return filepath\n else:\n print('Sync config does not exist: {}'.format(filepath))\n return None\n\n\ndef paths():\n \"\"\"\n returns paths to all the software used in the cookbook.\n :return:\n \"\"\"\n paths = user_config()['paths']\n if not paths == None:\n return paths\n\n else:\n print('user config paths does not exist in {}'.format(get_user_config_file()))\n return None\n\n\n\nclass ProjectConfig(object):\n user_config_file = get_user_config_file()\n user_config = user_config()\n default_company = user_config['default_company']\n project_config = {}\n shaders_config = {}\n globals_root = None\n master_globals_root = None\n default_files_folder = None\n hdri_folder = None\n cookbook_folder = None\n css_folder = None\n root_folder = None\n hdri_settings_file = None\n project_config_file = None\n shaders_config_file = None\n images_folder = None\n project_management = None\n sync_config_file = None\n\n def __init__(self, path_object=None, company='master', project='master', print_cfg=False):\n self.print_cfg = print_cfg\n self.paths = paths()\n if not path_object:\n self.company = company\n self.project = project\n if path_object:\n self.company = path_object.company\n self.project = path_object.project\n if self.print_cfg:\n print('--------------------------------')\n print('Loading Config for {}: {}'.format(self.company, self.project))\n self.set_globals_path()\n self.get_project_config()\n self.images_folder = os.path.join(self.paths['code_root'], 'resources', 'images')\n self.app_font_folder = os.path.join(self.paths['code_root'], 'resources', 'fonts')\n if self.project_config:\n self.project_management = self.project_config['account_info']['project_management']\n\n def set_globals_path(self):\n try:\n self.root_folder = self.user_config['paths']['root']\n except KeyError:\n self.root_folder = self.user_config['paths']['root']\n try:\n self.default_company = self.user_config['default_company']\n except KeyError:\n print('No Default company found in user config file')\n return\n # set self.master_globals_root\n master_globals = os.path.join(self.root_folder, 'master', 'config', 'master', 'globals.json')\n if not os.path.exists(master_globals):\n print('Cant find {}'.format(master_globals))\n # see if we have a company master globals.\n try:\n company_globals = os.path.join(self.root_folder, self.company, 'config', 'master', 'globals.json')\n if os.path.exists(company_globals):\n master_globals = company_globals\n except TypeError:\n pass\n\n # see if we have project globals.\n try:\n if not self.project:\n project = 'master'\n else:\n project = self.project\n project_globals = os.path.join(self.root_folder, self.company, 'config', project, 'globals.json')\n if os.path.exists(project_globals):\n master_globals = project_globals\n except TypeError:\n pass\n self.project_config_file = master_globals\n self.globals_root = os.path.dirname(master_globals)\n self.sync_config_file = os.path.join(self.globals_root, 'sync', 'team.json')\n self.css_folder = os.path.join(self.globals_root, 'css')\n self.default_files_folder = os.path.join(self.globals_root, 'default_files')\n self.hdri_folder = os.path.join(self.globals_root, 'hdri')\n self.hdri_settings_file = os.path.join(self.hdri_folder, 'settings.json')\n self.cookbook_folder = os.path.join(self.globals_root, 'cookbook')\n self.shaders_config_file = os.path.join(self.globals_root, 'shaders.json')\n\n def print_variables(self):\n for elem in self.__dict__:\n print('{}: {}'.format(elem, self.__dict__[elem]))\n\n def get_user_globals(self):\n # do they have an env variable\n try:\n\n if os.path.exists(self.user_config_file):\n self.user_config = load_json(self.user_config_file)\n except TypeError:\n print('No cgl_user_globals ENV variable found. Assuming location.')\n if os.path.exists(os.path.join(os.path.expanduser('~\\\\Documents'), 'cglumberjack', 'user_globals.json')):\n self.user_config = load_json(os.path.join(os.path.expanduser('~\\\\Documents'), 'cglumberjack', 'user_globals.json'))\n else:\n print('No Globals Found at %s:' % os.path.join(os.path.expanduser('~\\\\Documents'), 'cglumberjack',\n 'user_globals.json'))\n\n def edit_project_config(self, key_list, value):\n \"\"\"\n edits the current globals file given a key list and a value.\n :param key_list: list of strings representing a possibly nested key\n :param value:\n :return:\n \"\"\"\n temp_dict = self.project_config\n last = key_list[-1]\n for each in key_list:\n if each == last:\n temp_dict[each] = value\n temp_dict = temp_dict[each]\n self.save_project_config()\n\n def edit_user_config(self, key_list, value):\n \"\"\"\n\n :param key_list:\n :param value:\n :return:\n \"\"\"\n temp_dict = self.user_config\n length = len(key_list)\n if length == 1:\n temp_dict[key_list[0]] = value\n elif length == 2:\n temp_dict[key_list[0]][key_list[1]] = value\n elif length == 3:\n temp_dict[key_list[0]][key_list[1]][key_list[2]] = value\n elif length == 4:\n temp_dict[key_list[0]][key_list[1]][key_list[2]][key_list[3]] = value\n elif length == 5:\n print('Yikes, can we even handle 5 on this function edit_user_config()')\n self.save_user_config(temp_dict)\n\n def edit_shader_config(self, key_list, value):\n \"\"\"\n\n :param key_list:\n :param value:\n :return:\n \"\"\"\n temp_dict = self.shaders_config\n last = key_list[-1]\n for each in key_list:\n if each == last:\n temp_dict[each] = value\n temp_dict = temp_dict[each]\n self.save_shader_config()\n\n def save_project_config(self, project_config_dict=None):\n if not project_config_dict:\n print('no project config dict, aborting')\n return\n print('Saving project config...')\n save_json(self.project_config_file, project_config_dict)\n\n def save_user_config(self, user_config_dict=None):\n if not user_config_dict:\n user_config_dict = self.user_config\n save_json(self.user_config_file, user_config_dict)\n\n def save_shader_config(self, shader_config_dict=None):\n if not shader_config_dict:\n shader_config_dict = self.shaders_config\n save_json(self.shader_config_file, shader_config_dict)\n\n def get_project_config(self):\n \"\"\"\n returns a dictionary for the current project config globals.\n :return:\n \"\"\"\n if os.path.exists(self.project_config_file):\n self.project_config = load_json(self.project_config_file)\n return self.project_config\n else:\n print('Project Config {} does not exist, '.format(self.project_config_file))\n\n def get_shaders_config(self):\n \"\"\"\n returns a shader ditionary for use in shading tools.\n :return:\n \"\"\"\n self.shaders_config = load_json(self.shaders_config_file)\n return self.shaders_config\n\n def test_string_against_rules(self, test_string, rule, effected_label=None):\n \"\"\"\n Test for any string to see if it passes any regex \"rule\" from the global.yaml file.\n :param test_string: string to be tested against regex\n :param rule: regex pattern to test against\n :param effected_label: PySide Label Object to effect color of.\n :return:\n \"\"\"\n regex = re.compile(r'%s' % self.project_config['rules']['path_variables'][rule]['regex'])\n if re.findall(regex, test_string):\n if effected_label:\n effected_label.setStyleSheet(\"color: rgb(255, 255, 255);\")\n return False\n else:\n if effected_label:\n effected_label.setStyleSheet(\"color: rgb(255, 50, 50);\")\n return self.project_config['rules']['path_variables'][rule]['example']\n\n\n def image_path(self, image=None, ):\n \"\"\"\n get the path where images are stored\n :param image:\n :return:\n \"\"\"\n if image:\n return os.path.join(self.images_folder, image)\n else:\n return self.images_folder\n\n def icon_path(self, icon=None):\n \"\"\"\n get the path where icons are stored.\n :param icon:\n :return:\n \"\"\"\n if icon:\n return os.path.join(self.paths['code_root'], 'resources', 'icons', icon)\n else:\n return os.path.join(self.paths['code_root'], 'resources', 'icons')\n\n def font_path(self):\n \"\"\"\n get the path where fonts for the app are stored\n :return:\n \"\"\"\n return self.app_font_folder\n\n def get_cgl_resources_path(self):\n \"\"\"\n get the resources path\n :return: path string\n \"\"\"\n return os.path.join(self.paths['code_root'], 'resources')\n\n def get_task_default_file(self, task):\n \"\"\"\n returns the path to the default file of the given task\n :param task:\n :return:\n \"\"\"\n task_folder = os.path.join(self.default_files_folder, task)\n default_file = glob.glob('{}/default.*'.format(task_folder))\n if default_file:\n return os.path.join(task_folder, default_file[0])\n else:\n return None\n\n\ndef copy_config(from_company, from_project, to_company, to_project):\n from_config = ProjectConfig(company=from_company, project=from_project).globals_root\n to_config_root = os.path.join(get_root(from_project), to_company, 'config', to_project)\n print('Copying from {} to {}'.format(from_config, to_config_root))\n cgl_copy(from_config, to_config_root)\n\n\ndef check_for_latest_master(path_object=None):\n from cgl.core.utils.general import cgl_execute\n # TODO - need to look at this and make it require cfg if possible.\n # TODO - probably need something in place to check if git is installed.\n cfg = ProjectConfig(path_object)\n code_root = paths()['code_root']\n command = 'git remote show origin'\n os.chdir(code_root)\n output = cgl_execute(command, return_output=True, print_output=False)['printout']\n\n for line in output:\n if 'pushes to master' in line:\n if 'up to date' in line:\n print('cglumberjack code base up to date')\n return True\n else:\n print('cglumberjack code base needs updated')\n return False\n\n\ndef update_master(path_object=None, widget=None):\n from cgl.core.utils.general import cgl_execute\n # TODO - need to look at this and make it require cfg if possible.\n cfg = ProjectConfig(path_object)\n code_root = paths()['code_root']\n command = 'git pull'\n os.chdir(code_root)\n cgl_execute(command)\n if widget:\n widget.close()\n\n\ndef get_root(project='master'):\n \"\"\"\n gets root from the current project defaults to 'master'.\n :return:\n \"\"\"\n user_conf = user_config()\n return user_conf['paths']['root'].replace('\\\\', '/')\n\n\n\n\n\n\nif __name__ == '__main__':\n print('bob')\n #create_user_globals(root=None)\n # project_config = ProjectConfig(company='bob')\n # print(project_config.globals_root)\n # these return file paths\n # print(project_config.globals_file)\n # print(project_config.shaders_file)\n # print(project_config.user_globals_file)\n # # these return dictionaries for common things\n # print(project_config.user_config)\n # print(project_config.project_config)\n # # this will return a dictionary for the shaders.\n # print(project_config.get_shaders_config())\n","sub_path":"cgl/core/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":13606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"149986998","text":"import time, os, sys, gc\nimport multiprocessing as mp\nfrom glob import glob\nfrom copy import copy\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nsys.path.insert(0, 'ZZ4b/nTupleAnalysis/scripts/') #https://github.com/patrickbryant/PlotTools\nimport matplotlibHelpers as pltHelper\nfrom functools import partial\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-d', '--data', default='/uscms/home/bryantp/nobackup/ZZ4b/data2018/picoAOD.h5', type=str, help='Input dataset file in hdf5 format')\nparser.add_argument('--data4b', default=None, help=\"Take 4b from this file if given, otherwise use --data for both 3-tag and 4-tag\")\nparser.add_argument('-t', '--ttbar', default='', type=str, help='Input MC ttbar file in hdf5 format')\nparser.add_argument('--ttbar4b', default=None, help=\"Take 4b ttbar from this file if given, otherwise use --ttbar for both 3-tag and 4-tag\")\nparser.add_argument('-s', '--signal', default='', type=str, help='Input dataset file in hdf5 format')\nparser.add_argument('-o', '--outdir', default='.', type=str, help='outputDirectory')\nparser.add_argument('--weightName', default=\"mcPseudoTagWeight\", help='Which weights to use for JCM.')\nparser.add_argument('--FvTName', default=\"FvT\", help='Which weights to use for FvT.')\nargs = parser.parse_args()\n\nlock = mp.Lock()\ndef getFrame(fileName, selection='', PS=None, weight='mcPseudoTagWeight'):\n yearIndex = fileName.find('201')\n year = float(fileName[yearIndex:yearIndex+4])\n #print(\"Reading\",fileName)\n thisFrame = pd.read_hdf(fileName, key='df')\n thisFrame['year'] = pd.Series(year*np.ones(thisFrame.shape[0], dtype=np.float32), index=thisFrame.index)\n n = thisFrame.shape[0]\n if PS:\n keep_fraction = 1/PS\n print(\"Only keep %f of threetag\"%keep_fraction)\n #lock.acquire()\n #np.random.seed(n)\n keep = (thisFrame.fourTag) | (np.random.rand(thisFrame.shape[0]) < keep_fraction) # a random subset of t3 events will be kept set\n #np.random.seed(0)\n #lock.release()\n keep_fraction = (keep & ~thisFrame.fourTag).sum()/(~thisFrame.fourTag).sum() # update keep_fraction with actual fraction instead of target fraction\n print(\"keep fraction\",keep_fraction)\n thisFrame = thisFrame[keep]\n thisFrame.loc[~thisFrame.fourTag, weight] = thisFrame[~thisFrame.fourTag][weight] / keep_fraction\n\n if selection:\n thisFrame = thisFrame.loc[eval(selection.replace('df','thisFrame'))]\n\n n_after = thisFrame.shape[0]\n print(\"Read\",fileName,year,n,'->',n_after, n_after/n)\n\n return thisFrame\n\ndef getFramesHACK(fileReaders,getFrame,dataFiles,PS=None, selection='', weight='mcPseudoTagWeight'):\n largeFiles = []\n print(\"dataFiles was:\",dataFiles)\n # for d in dataFiles:\n # if Path(d).stat().st_size > 2e9:\n # print(\"Large File\",d)\n # largeFiles.append(d)\n # dataFiles.remove(d)\n\n results = fileReaders.map_async(partial(getFrame, PS=PS, selection=selection, weight=weight), sorted(dataFiles))\n #results = fileReaders.map_async(getFrame, sorted(dataFiles))\n frames = results.get()\n\n for f in largeFiles:\n frames.append(getFrame(f))\n\n gc.collect()\n return frames\n\n\n\n\noutputDir = args.outdir\nif not os.path.isdir(outputDir):\n print(\"Making output dir\",outputDir)\n os.mkdir(outputDir)\n\nfileReaders = mp.Pool(10)\n\nweightName = args.weightName\nprint(\"Using JCM weight with name: \",weightName)\n\nFvTName = args.FvTName\nprint(\"Using FvT weight with name: \",FvTName)\n\n\nclass nameTitle:\n def __init__(self,name,title):\n self.name = name\n self.title= title\n\nclass classInfo:\n def __init__(self, abbreviation='', name='', index=None, color=''):\n self.abbreviation = abbreviation\n self.name = name\n self.index = index\n self.color = color\n\nd4 = classInfo(abbreviation='d4', name= 'FourTag Data', index=0, color='red')\nd3 = classInfo(abbreviation='d3', name= 'ThreeTag Data', index=1, color='orange')\nt4 = classInfo(abbreviation='t4', name= r'FourTag $t\\bar{t}$', index=2, color='green')\nt3 = classInfo(abbreviation='t3', name=r'ThreeTag $t\\bar{t}$', index=3, color='cyan')\nzz = classInfo(abbreviation='zz', name=r'$ZZ$ MC $\\times100$', index=4, color='blue')\nzh = classInfo(abbreviation='zh', name=r'$ZH$ MC $\\times100$', index=5, color='violet')\n\ndfs = []\n\nselection = 'df.passMDRs & df.passHLT & ~(df.SR & df.fourTag)'\n\n# Read .h5 files\ndataFiles = glob(args.data)\nif args.data4b:\n dataFiles += glob(args.data4b) \n\nframes = getFramesHACK(fileReaders,getFrame,dataFiles, selection=selection, weight=args.weightName)\n\ndfD = pd.concat(frames, sort=False)\n\nprint(\"Add true class labels to data\")\ndfD['d4'] = dfD.fourTag\ndfD['d3'] = ~dfD.fourTag\ndfD['t4'] = False\ndfD['t3'] = False\ndfD['zz'] = False\ndfD['zh'] = False\n\ndfs.append(dfD)\n\n# Read .h5 files\nttbarFiles = glob(args.ttbar)\nif args.ttbar4b:\n ttbarFiles += glob(args.ttbar4b) \n\n\nselection = 'df.passMDRs & df.passHLT'\n\nframes = getFramesHACK(fileReaders,getFrame,ttbarFiles, PS=10, selection=selection, weight=args.weightName)\ndfT = pd.concat(frames, sort=False)\n\nprint(\"Add true class labels to ttbar MC\")\ndfT['t4'] = dfT.fourTag\ndfT['t3'] = ~dfT.fourTag\ndfT['d4'] = False\ndfT['d3'] = False\ndfT['zz'] = False\ndfT['zh'] = False\n\ndfs.append(dfT)\n\nif args.signal:\n frames = []\n for fileName in sorted(glob(args.signal)):\n yearIndex = fileName.find('201')\n year = float(fileName[yearIndex:yearIndex+4])\n print(\"Reading\",fileName)\n thisFrame = pd.read_hdf(fileName, key='df')\n print(\"Add year to dataframe\",year)#,\"encoded as\",(year-2016)/2)\n thisFrame['year'] = pd.Series(year*np.ones(thisFrame.shape[0], dtype=np.float32), index=thisFrame.index)\n print(\"Add true class labels to signal\")\n if \"ZZ4b201\" in fileName: \n index = zz.index\n thisFrame['zz'] = thisFrame.fourTag\n thisFrame['zh'] = False\n if \"ZH4b201\" in fileName: \n index = zh.index\n thisFrame['zz'] = False\n thisFrame['zh'] = thisFrame.fourTag\n thisFrame['t4'] = False\n thisFrame['t3'] = False\n thisFrame['d4'] = False\n thisFrame['d3'] = False\n frames.append(thisFrame)\n dfS = pd.concat(frames, sort=False)\n dfs.append(dfS)\n\n\nprint(\"concatenate dataframes\")\ndf = pd.concat(dfs, sort=False)\n\n\ndef setIndex(dataFrame):\n i = pd.RangeIndex(dataFrame.shape[0])\n dataFrame.set_index(i, inplace=True) \n\n\nclass dataFrameOrganizer:\n def __init__(self, dataFrame):\n self.df = dataFrame\n self.dfSelected = dataFrame\n self.dfd4 = None\n self.dfd3 = None\n self.dft4 = None\n self.dft3 = None\n self.dfbg = None\n self.dfzz = None\n self.dfzh = None\n self.dfsg = None\n\n def applySelection(self, selection):\n print(\"Apply selection\")\n self.dfSelected = self.df.loc[ selection ]\n print('Split by class')\n \n self.dfd4 = self.dfSelected.loc[ self.dfSelected.d4 ]\n self.dfd3 = self.dfSelected.loc[ self.dfSelected.d3 ]\n self.dft4 = self.dfSelected.loc[ self.dfSelected.t4 ]\n self.dft3 = self.dfSelected.loc[ self.dfSelected.t3 ]\n self.dfbg = self.dfSelected.loc[ (self.dfSelected.d3) | (self.dfSelected.t4) ]\n if args.signal:\n self.dfzz = self.dfSelected.loc[ self.dfSelected.zz ]\n self.dfzh = self.dfSelected.loc[ self.dfSelected.zh ]\n self.dfsg = self.dfSelected.loc[ (self.dfSelected.zz) | (self.dfSelected.zh) ]\n print('Garbage collect')\n gc.collect()\n\n def plotVar(self, var, bins=None, xmin=None, xmax=None, ymin=None, ymax=None, reweight=False, variance=False, overflow=False):\n\n d3t3Weights = None\n d3t4Weights = None\n ttbarErrorWeights = None\n if reweight:\n ttbarWeights = -getattr(self.dft3,weightName) * getattr(self.dft3,FvTName)\n multijet = self.dfd3[var]\n multijetWeights = getattr(self.dfd3,weightName) * getattr(self.dfd3,FvTName)\n background = np.concatenate((self.dfd3[var], self.dft4[var]))\n backgroundWeights = np.concatenate((getattr(self.dfd3,weightName) * getattr(self.dfd3,FvTName), getattr(self.dft4,weightName)))\n # ttbar estimates from reweighted threetag data\n d3t3Weights = -1 * multijetWeights * getattr(self.dfd3,'FvT_pt3') / getattr(self.dfd3,'FvT_pd3')\n d3t4Weights = getattr(self.dfd3,weightName) * getattr(self.dfd3,'FvT_pt4') / getattr(self.dfd3,'FvT_pd3')\n ttbarErrorWeights = np.concatenate( (getattr(self.dft4,weightName), -d3t4Weights, ttbarWeights, -d3t3Weights) )\n ttbarError = np.concatenate( ( self.dft4[var], self.dfd3[var], self.dft3[var], self.dfd3[var] ) )\n else:\n ttbarWeights = -getattr(self.dft3,weightName)\n multijet = np.concatenate((self.dfd3[var], self.dft3[var]))\n multijetWeights = np.concatenate((getattr(self.dfd3,weightName), -getattr(self.dft3,weightName)))\n background = np.concatenate((self.dfd3[var], self.dft3[var], self.dft4[var]))\n backgroundWeights = np.concatenate((getattr(self.dfd3,weightName), -getattr(self.dft3,weightName), getattr(self.dft4,weightName)))\n\n self.dsd4 = pltHelper.dataSet(name=d4.name, \n points =self.dfd4[var],\n weights=getattr(self.dfd4,weightName), \n color=d4.color, alpha=1.0, linewidth=1)\n self.bkgd = pltHelper.dataSet(name='Background Model', \n points =background,\n weights=backgroundWeights, \n color='brown', alpha=1.0, linewidth=1)\n self.dst4 = pltHelper.dataSet(name=t4.name, \n points =self.dft4[var],\n weights=getattr(self.dft4,weightName), \n color=t4.color, alpha=1.0, linewidth=1)\n self.dsm3 = pltHelper.dataSet(name='ThreeTag Multijet', \n points =multijet,\n weights=multijetWeights,\n color=d3.color, alpha=1.0, linewidth=1)\n self.dst3 = pltHelper.dataSet(name=t3.name, \n points=self.dft3[var],\n weights=ttbarWeights,\n color=t3.color, alpha=1.0, linewidth=1)\n\n datasets = [self.dsd4,self.bkgd,self.dst4,self.dsm3,self.dst3]\n if variance:\n self.dsm3_variance = pltHelper.dataSet(name='3b MJ Weight SD', \n points =multijet,\n weights=multijetWeights * getattr(self.dfd3,FvTName+'_std'),\n color=d3.color, alpha=0.5, linewidth=1)\n datasets += [self.dsm3_variance]\n\n if d3t3Weights is not None:\n self.dsd3t3 = pltHelper.dataSet(name =r'ThreeTag $t\\bar{t}$ est.',\n points =self.dfd3[var],\n weights=d3t3Weights,\n color=t3.color, alpha=0.5, linewidth=2)\n datasets += [self.dsd3t3]\n\n if d3t4Weights is not None:\n self.dsd3t4 = pltHelper.dataSet(name =r'FourTag $t\\bar{t}$ est.',\n points =self.dfd3[var],\n weights=d3t4Weights,\n color=t4.color, alpha=0.5, linewidth=2)\n datasets += [self.dsd3t4]\n\n if ttbarErrorWeights is not None:\n self.dste = pltHelper.dataSet(name =r'$t\\bar{t}$ MC - $t\\bar{t}$ est.',\n points =ttbarError,\n weights=ttbarErrorWeights,\n color='black', alpha=0.5, linewidth=2)\n datasets += [self.dste]\n\n if self.dfzz is not None:\n self.dszz = pltHelper.dataSet(name=zz.name,\n points=self.dfzz[var],\n weights=getattr(self.dfzz,weightName)*100,\n color=zz.color, alpha=1.0, linewidth=1)\n datasets += [self.dszz]\n\n if self.dfzh is not None:\n self.dszh = pltHelper.dataSet(name=zh.name,\n points=self.dfzh[var],\n weights=getattr(self.dfzh,weightName)*100,\n color=zh.color, alpha=1.0, linewidth=1)\n datasets += [self.dszh]\n\n if type(bins)!=list:\n if not bins: bins=50\n if type(xmin)==type(None): xmin = self.dfSelected[var].min()\n if type(xmax)==type(None): xmax = self.dfSelected[var].max()\n width = (xmax-xmin)/bins\n bins = [xmin + b*width for b in range(0,bins+1)]\n\n if reweight:\n chisquare = pltHelper.histChisquare(obs=self.dsd4.points, obs_w=self.dsd4.weights,\n exp=self.bkgd.points, exp_w=self.bkgd.weights,\n bins=bins, overflow=overflow)\n\n args = {'dataSets': datasets,\n 'ratio': [0,1],\n 'ratioRange': [0.9,1.1] if reweight else [0.5, 1.5],\n 'ratioTitle': 'Data / Model',\n 'bins': bins,\n 'xmin': xmin,\n 'xmax': xmax,\n 'ymin': ymin,\n 'ymax': ymax,\n 'xlabel': var.replace('_',' '),\n 'ylabel': 'Events / Bin',\n 'overflow': overflow,\n }\n fig = pltHelper.histPlotter(**args)\n if reweight:\n fig.sub1.annotate('$\\chi^2/$NDF = %1.2f (%1.0f$\\%%$)'%(chisquare.chi2/chisquare.ndfs, chisquare.prob*100), (1.0,1.02), horizontalalignment='right', xycoords='axes fraction')\n figName = outputDir + \"/\"+var+('_reweight' if reweight else '')+'.pdf'\n fig.savefig(figName)\n print(figName)\n\n def hist2d(self, dfName, xvar, yvar ,bins=50,range=None,reweight=False): # range = [[xmin, xmax], [ymin, ymax]]\n df = getattr(self,dfName)\n x,y = df[xvar],df[yvar]\n if reweight:\n weights = getattr(df,weightName) * (getattr(df,FvTName) * (~df.fourTag) + df.fourTag)\n else:\n weights = getattr(df,weightName)\n xlabel = xvar.replace('_',' ')\n ylabel = yvar.replace('_',' ')\n args = {'x':x, 'y':y, 'weights':weights,\n 'xlabel': xlabel,\n 'ylabel': ylabel,\n 'zlabel': 'Events / Bin',\n 'bins': bins,\n 'range': range,\n }\n fig = pltHelper.hist2d(**args)\n figName = outputDir +\"/\"+dfName+\"_\"+xvar+\"_vs_\"+yvar+(\"_reweight\" if reweight else \"\")+\".pdf\"\n fig.savefig(figName)\n print(figName)\n\n\n# print(\"Blind 4 tag SR\")\n# df = df.loc[ (~df.SR) | (~df.d4) ]\n\ndfo = dataFrameOrganizer(df)\n\n# print(\"dfo.applySelection( dfo.df.passHLT & dfo.df.passMDRs )\")\n# dfo.applySelection( dfo.df.passHLT & dfo.df.passMDRs )\n\n#\n# Example plots\n#\nprint(\"Example commands:\")\nprint(\"dfo.applySelection( ~dfo.df.SR )\")\nprint(\"dfo.plotVar('dRjjOther', reweight=True)\")\nprint(\"dfo.hist2d('dfbg', 'canJet0_eta', 'FvT')\")\n# dfo.plotVar('dRjjOther')\n# dfo.plotVar('dRjjOther', reweight=True)\n# dfo.hist2d('dfbg', 'canJet0_eta', 'FvT')\n\n# #dfo.df['SvB_q_max'] = dfo.df[['SvB_q_1234', 'SvB_q_1324', 'SvB_q_1423']].idxmax(axis=1)\n# SvB_q_score = dfo.df[['SvB_q_1234', 'SvB_q_1324', 'SvB_q_1423']].values\n# FvT_q_score = dfo.df[['FvT_q_1234', 'FvT_q_1324', 'FvT_q_1423']].values\n# SvB_q_max = np.amax(SvB_q_score, axis=1, keepdims=True)\n# FvT_q_max = np.amax(FvT_q_score, axis=1, keepdims=True)\n# events, SvB_q_max_index = np.where(SvB_q_score==SvB_q_max)\n# events, FvT_q_max_index = np.where(FvT_q_score==FvT_q_max)\n# dfo.df['SvB_q_max_index'] = SvB_q_max_index\n# dfo.df['FvT_q_max_index'] = FvT_q_max_index\n# FvT_q_at_SvB_q_max_index = FvT_q_score[events, SvB_q_max_index]\n# SvB_q_at_FvT_q_max_index = SvB_q_score[events, FvT_q_max_index]\n# dfo.df['FvT_q_at_SvB_q_max_index'] = FvT_q_at_SvB_q_max_index\n# dfo.df['SvB_q_at_FvT_q_max_index'] = SvB_q_at_FvT_q_max_index\n\n# dfo.applySelection( (dfo.df.passHLT==True) & (dfo.df.SB==True) & (dfo.df.xWt > 2) )\n\n# def plot_q_scores():\n# names = ['SvB_q_1234', 'SvB_q_1324', 'SvB_q_1423', 'FvT_q_1234', 'FvT_q_1324', 'FvT_q_1423']\n# for name in names:\n# dfo.plotVar(name, xmin=0, xmax=1, bins=20, reweight=True)\n# dfo.plotVar('SvB_q_max_index', xmin=-0.5, xmax=2.5, bins=3, reweight=True)\n# dfo.plotVar('FvT_q_max_index', xmin=-0.5, xmax=2.5, bins=3, reweight=True)\n# dfo.plotVar('FvT_q_at_SvB_q_max_index', xmin=0, xmax=1, bins=20, reweight=True) \n# dfo.plotVar('SvB_q_at_FvT_q_max_index', xmin=0, xmax=1, bins=20, reweight=True) \n\n# plot_q_scores()\n\n\n# get good example events for illustration of classifier response\n# dfo.applySelection( (dfo.df.passHLT==True) )\n# Get Year of most signal like event\n# dfo.dfzh[ dfo.dfzh.SvB_pzh.max() == dfo.dfzh.SvB_pzh ].year\n","sub_path":"nTupleAnalysis/scripts/plotHDF5.py","file_name":"plotHDF5.py","file_ext":"py","file_size_in_byte":17675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"90268029","text":"import math\nimport datetime\nimport pandas as pd\nimport numpy as np\nimport os\n\ndef warn(*args, **kwargs):\n pass\nimport warnings\nwarnings.warn = warn\n\n\n#풍향/풍속 인코딩\ndef wind_cos_sin(df):\n wind_dir = df['Wind Direction(16)']\n wind_dir_deg = np.deg2rad(wind_dir)\n\n wind_dir_cos = wind_dir_deg.apply(math.cos)\n wind_dir_sin = wind_dir_deg.apply(math.sin)\n\n df['wind_dir_cos'] = wind_dir_cos#.round(1)\n df['wind_dir_sin'] = wind_dir_sin#.round(1)\n \n return df\n\ndef new_wind_speed_direction(df,phi):\n theta = df['Wind Direction(16)']\n wind_speed = df['Wind Speed(m/s)']\n deg = theta - phi\n\n cos_deg = np.deg2rad(deg).apply(math.cos)\n\n new_wind_speed = wind_speed*cos_deg\n\n df['new_wind_speed'] = new_wind_speed#.round(1)\n \n return df\n\n#발전량 moving average\ndef make_moving_average_df(df,hours,year):\n df = df.reset_index(drop=True)\n if year == 0:\n df_cycle = pd.DataFrame(columns=['datetime'])\n df_cycle['datetime'] = df['datetime'].loc[:(24)*365-1]\n for hour in hours:\n name = 'ma'+str(hour)\n\n df_cycle[name]= df['발전량(kW)'].rolling(hour).mean().shift(-hour).loc[year:(year+1)*(24)*365-1]\n \n elif year == 1:\n df_cycle = pd.DataFrame(columns=['datetime'])\n df_cycle['datetime'] = df['datetime'].loc[year*(24)*365:(year+1)*(24)*365-1]\n for hour in hours:\n name = 'ma'+str(hour)\n\n df_cycle[name]= df['발전량(kW)'].rolling(hour).mean().shift(-hour).loc[24*365:2*(24)*365-1]\n \n \n return df_cycle.reset_index(drop=True)\n\n#발전량 moving average feature추가 \ndef load_power_ma_forecast(df_target_date, df_ma, hour):\n \n if df_target_date.month >= 7 :\n year = 2017\n elif df_target_date.month < 7 :\n year = 2018\n \n target = datetime.datetime(year,\n df_target_date.month, \n df_target_date.day, \n df_target_date.hour,\n 0,\n 0)\n \n \n name = 'ma'+str(hour)+'_'+str(year)\n \n try:\n return float(df_ma[df_ma['datetime'] == target][name])\n except Exception as e :\n return -1\n\ndef load_power_ma_forecast_mean(df_target_date, df_ma, hour):\n \n year = 2017\n \n target = datetime.datetime(year,\n df_target_date.month, \n df_target_date.day, \n df_target_date.hour,\n 0,\n 0)\n \n name = 'ma'+str(hour)+'_mean'\n try:\n return float(df_ma[df_ma['datetime'] == target][name])\n\n except Exception as e :\n return -1\n\n \n# Feature Windowing\ndef fe_add_timestep(df_original, num_timestep) : \n \n num_timestep = num_timestep//3\n df = df_original.copy()\n df_shifted = df_original.copy()\n df_shifted = df_shifted.shift(num_timestep)\n\n ### previous, later 함수 대상으로는 돌지 않게\n lst = list(df.columns)\n lst2 = list(df.columns)\n for col in lst:\n if 'previous' in col :\n lst2.remove(col)\n for col in lst:\n if 'later' in col :\n lst2.remove(col)\n columns = lst2\n columns.remove('date')\n columns.remove('date(forecast)')\n columns.remove('datetime')\n columns.remove('datetime(forecast)')\n columns.remove('Power Generation(kW)+0')\n columns.remove('Power Generation(kW)+1')\n columns.remove('Power Generation(kW)+2')\n columns.remove('location')\n \n num_timestep = num_timestep*3\n for column in columns :\n df[column+' (previous %d)'%num_timestep] = df_shifted[column]\n df = df[df['Celsius(Lowest) (previous %d)'%num_timestep].notnull()]\n \n return df\n\n#add feature moving average\ndef fe_add_previous_n_hours_mean_kpx(df_original, columns, how_long=1):\n df = df_original.copy()\n \n a = df['datetime(forecast)'].loc[0]\n \n n = df[df['datetime(forecast)'] == datetime.datetime(a.year, a.month, a.day, a.hour+15)].index[0]-1\n how_long = how_long*n\n \n for column in columns :\n df[column+'('+str(n*12)+' hours mean)'] = 0\n for idx in range(how_long) :\n df[column+'('+str(n*12)+'hours mean)'] += df[column].shift(idx+1)\n df[column+'('+str(n*12)+' hours mean)'] /= how_long\n\n df[column+'('+str(n*12)+'hours mean)'] = df[column+'('+str(n*12)+'hours mean)'].astype(float).round(1)\n df = df[how_long:] \n\n return df\n\ndef add_time_feature(df):\n df['month'] = df['datetime'].dt.month\n df['day'] = df['datetime'].dt.day\n df['hour'] = df['datetime'].dt.hour\n \n return df","sub_path":"models/functions/feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":4699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"123976748","text":"# \n# version_increment.py - Simple versioning script for Platformio\n# \n# Copyright (C) 2020 Davide Perini\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy of \n# this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell \n# copies of the Software, and to permit persons to whom the Software is \n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in \n# all copies or substantial portions of the Software.\n# \n# You should have received a copy of the MIT License along with this program. \n# If not, see .\n# \n\nimport sys\nimport datetime\nimport os \n\n## DO NOT EDIT THIS FILE, edit version file if you want to start from a different version\nBUILD_NUMBER = 'version'\nVERSION_FILE = 'Version.h'\nversion = '0.1.'\n\n## Increment version during the upload stage only\nupload = False\nn = len(sys.argv) \n\nfor i in range(1, n): \n if sys.argv[i] == \"upload\":\n upload = True; \n\nif upload:\n \n print(\"Version Increment Scritp ARGS=\")\n print (sys.argv[1:]) \n\n build_no = 0\n\n try:\n with open(BUILD_NUMBER) as f:\n build_no = f.readline()\n version = build_no[0:build_no.rindex('.')+1]\n build_no = int(build_no[build_no.rindex('.')+1:]) + 1 \n except:\n print('No version file found or incorrect data in it. Starting from 0.1.0')\n build_no = 1\n with open(BUILD_NUMBER, 'w+') as f:\n f.write(version + str(build_no))\n print('Build number: {}'.format(version + str(build_no)))\n\n hf = \"\"\"\n // AUTO GENERATED FILE FROM version_increment.py, DO NOT EDIT THIS FILE\n #ifndef VERSION\n #define VERSION \"{}\"\n #endif\n #ifndef BUILD_TIMESTAMP\n #define BUILD_TIMESTAMP \"{}\"\n #endif\n \"\"\".format(version + str(build_no), datetime.datetime.now(), version+str(build_no))\n\n if (os.environ.get('PLATFORMIO_INCLUDE_DIR') != None):\n VERSION_FILE = os.environ.get('PLATFORMIO_INCLUDE_DIR') + \"/\" + VERSION_FILE\n elif os.path.exists(\"include\"):\n VERSION_FILE = \"include/\" + VERSION_FILE\n \n with open(VERSION_FILE, 'w+') as f:\n f.write(hf)\nelse: \n print(\"Version Increment Script. Nothing to do. ARGS=\")\n print (sys.argv[1:])","sub_path":"version_increment.py","file_name":"version_increment.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"635827933","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport time\nfrom ..items import AqiItem\n\n\nclass SpiderAqiSpider(scrapy.Spider):\n name = 'spider_aqi'\n allowed_domains = ['aqistudy.cn']\n start_urls = ['https://www.aqistudy.cn/historydata/']\n\n def parse(self, response):\n city_list = response.xpath('//ul[@class=\"unstyled\"]/div[2]/li/a')\n for city in city_list:\n # 处理每个城市的信息\n item = AqiItem()\n item['city'] = city.xpath('./text()').extract_first()\n item['time_stamp'] = time.time()\n link = response.urljoin(city.xpath('./@href').extract_first())\n print(item, link)\n yield scrapy.Request(link, self.parse_month, meta={'item':item})\n\n def parse_month(self, response):\n \"\"\"处理每个月的信息\"\"\"\n month_link_url = response.xpath('//ul[@class=\"unstyled1\"]/li/a/@href').extract()\n item = response.meta['item']\n for month_link in month_link_url:\n link = 'https://www.aqistudy.cn/historydata/' + month_link\n item['data_link'] = link\n yield scrapy.Request(link, self.parse_day, meta={'item':item})\n\n def parse_day(self, response):\n \"\"\"处理每日的详细信息\"\"\"\n day_info_list = response.xpath('//tbody/tr')\n item = response.meta['item']\n for day_info in day_info_list[1:]:\n item['date'] = day_info.xpath('./td[1]/text()').extract_first()\n item['aqi'] = day_info.xpath('./td[2]/text()').extract_first()\n item['level'] = day_info.xpath('./td[3]/span/text()').extract_first()\n item['pm2_5'] = day_info.xpath('./td[4]/text()').extract_first()\n item['pm10'] = day_info.xpath('./td[5]/text()').extract_first()\n item['so2'] = day_info.xpath('./td[6]/text()').extract_first()\n item['co'] = day_info.xpath('./td[7]/text()').extract_first()\n item['no2'] = day_info.xpath('./td[8]/text()').extract_first()\n item['o3_8h'] = day_info.xpath('./td[9]/text()').extract_first()\n yield item","sub_path":"aqi/aqi/spiders/spider_aqi.py","file_name":"spider_aqi.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"228662838","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/jhunk/Downloads/pandokia/pandokia/pcgi_treewalk.py\n# Compiled at: 2018-06-04 12:38:26\n# Size of source mod 2**32: 23465 bytes\nimport sys, cgi, re, copy, time, os\ntry:\n from html import escape\n from urllib.parse import urlencode\nexcept ImportError:\n from cgi import escape\n from urllib import urlencode\n\nimport pandokia\npdk_db = pandokia.cfg.pdk_db\nimport pandokia.text_table as text_table, pandokia.pcgi\nfrom . import common\nremove_arrow = '<'\ndebug_cmp = 0\n\ndef get_form(form, value, default):\n if value in form:\n return form.getvalue(value)\n else:\n return default\n\n\ndef treewalk():\n form = pandokia.pcgi.form\n output = sys.stdout\n output.write(common.cgi_header_html)\n output.write(common.page_header())\n if 'test_name' in form:\n test_name = form.getvalue('test_name')\n if test_name == '':\n test_name = '*'\n else:\n test_name = '*'\n context = form.getvalue('context', '*')\n host = form.getvalue('host', '*')\n test_run = form.getvalue('test_run', '*')\n project = form.getvalue('project', '*')\n status = form.getvalue('status', '*')\n attn = form.getvalue('attn', '*')\n qid = form.getvalue('qid', None)\n debug_cmp = form.getvalue('debug_cmp', 0)\n cmp_test_run = form.getvalue('cmp_test_run', None)\n cmp_context = form.getvalue('cmp_context', None)\n cmp_host = form.getvalue('cmp_host', None)\n test_run = common.find_test_run(test_run)\n if cmp_test_run:\n cmp_test_run = common.find_test_run(cmp_test_run)\n comparing = 0\n if 'compare' in form:\n comparing = 1\n x = form.getvalue('compare', '0')\n if x == '' or x == '0' or x.startswith('Turn Off'):\n comparing = 0\n if x.startswith('Reverse'):\n t = cmp_test_run\n cmp_test_run = test_run\n test_run = t\n t = cmp_host\n cmp_host = host\n host = t\n t = cmp_context\n cmp_context = context\n context = t\n comparing = 1\n query = {'test_name':test_name, \n 'test_run':test_run, \n 'project':project, \n 'host':host, \n 'status':status, \n 'attn':attn, \n 'context':context, \n 'compare':comparing}\n if qid is not None:\n qid = int(qid)\n query['qid'] = qid\n if cmp_test_run is not None:\n query['cmp_test_run'] = cmp_test_run\n if cmp_context is not None:\n query['cmp_context'] = cmp_context\n if cmp_host is not None:\n query['cmp_host'] = cmp_host\n header_table = text_table.text_table()\n header_table.set_html_table_attributes(' style=\"font-size: large; font-weight: bold\" ')\n row = 0\n if test_run is None:\n output.write('Tree not generated.
No tests available.')\n return\n if test_run != '*':\n lquery = copy.copy(query)\n lquery['test_run'] = '*'\n test_run_line = '

%s = %s     %s     %s     %s

\\n'\n header_table.set_value(row, 0, 'test_run')\n header_table.set_value(row, 1, '=')\n header_table.set_value(row, 2, escape(test_run))\n header_table.set_value(row,\n 3, html=(common.self_href(lquery, 'treewalk', remove_arrow)))\n tmp2 = common.run_previous(None, test_run)\n tmp3 = common.run_next(None, test_run)\n if tmp2 is not None:\n lquery['test_run'] = tmp2\n tmp2 = common.self_href(lquery, 'treewalk', ' (%s)' % tmp2)\n header_table.set_value(row, 4, html=tmp2)\n else:\n tmp2 = ''\n if tmp3 is not None:\n lquery['test_run'] = tmp3\n tmp3 = common.self_href(lquery, 'treewalk', ' (%s)' % tmp3)\n header_table.set_value(row, 5, html=tmp3)\n else:\n tmp3 = ''\n row = row + 1\n for var, label in (('project', 'project'), ('host', 'host'), ('context', 'context'),\n ('status', 'status')):\n if query[var] != '*':\n lquery = copy.copy(query)\n lquery[var] = '*'\n header_table.set_value(row, 0, label)\n header_table.set_value(row, 1, '=')\n header_table.set_value(row, 2, escape(lquery[var]))\n header_table.set_value(row,\n 3, html=(common.self_href(lquery, 'treewalk', remove_arrow)))\n row = row + 1\n\n if qid is not None:\n header_table.set_value(row, 0, 'QID')\n header_table.set_value(row, 2, str(qid))\n row = row + 1\n else:\n print(header_table.get_html())\n output.write('

Test Prefix: ')\n lquery = copy.copy(query)\n t = test_name\n lst = []\n while True:\n y = re.search('[/.]', t)\n if not y:\n break\n lst.append(t[0:y.start() + 1])\n t = t[y.start() + 1:]\n\n t = ''\n for x in lst:\n t = t + x\n lquery['test_name'] = t + '*'\n line = common.self_href(lquery, 'treewalk', escape(x))\n output.write(line)\n\n if test_name != '*':\n lquery['test_name'] = ''\n output.write('   ')\n output.write(common.self_href(lquery, 'treewalk', remove_arrow))\n output.write(' ')\n output.write('

\\n')\n print(cmp_form(query, comparing))\n print('

')\n lquery = copy.copy(query)\n if comparing:\n t = 'show all (not just different)'\n else:\n t = 'show all'\n show_all_line = common.self_href(lquery, 'treewalk.linkout', t) + ' - '\n lquery['add_attributes'] = 1\n show_all_line += common.self_href(lquery, 'treewalk.linkout', 'with attributes')\n lquery['add_attributes'] = 2\n show_all_line += ' - ' + common.self_href(lquery, 'treewalk.linkout', 'Column Selector')\n output.write(show_all_line)\n output.write('
')\n prefixes = collect_prefixes(query)\n table = collect_table(prefixes, query, comparing)\n if comparing:\n query_2 = query.copy()\n query_2['test_run'] = cmp_test_run\n query_2['host'] = cmp_host\n query_2['context'] = cmp_context\n t2 = collect_table(prefixes, query_2, 1)\n for row in range(0, len(table.rows)):\n for col in range(1, len(table.rows[row].lst)):\n c1 = table.get_cell(row, col)\n c2 = t2.get_cell(row, col)\n try:\n c1v = int(c1.text)\n c2v = int(c2.text)\n except ValueError:\n continue\n\n if c1v == 0:\n if c2v == 0:\n c1.link = None\n diff = c1v - c2v\n if debug_cmp:\n c1.text = '%d - %d = %+d' % (c1v, c2v, diff)\n else:\n if diff == 0:\n c1.text = '0'\n else:\n c1.text = '%+d' % diff\n\n if comparing:\n output.write('

Net difference in counts, this - other

')\n output.write(table.get_html())\n output.write('
')\n output.write(show_all_line)\n output.write('
')\n output.flush()\n if 'qid' in query:\n return\n more_where = None\n for field in ('test_run', 'project', 'context', 'host'):\n if '*' not in query[field]:\n pass\n else:\n lquery = {}\n for x in query:\n if query[x] is not None:\n lquery[x] = query[x]\n\n output.write('

Narrow to %s

' % field)\n tn = test_name\n if not tn.endswith('*'):\n tn = tn + '*'\n where_text, where_dict = pdk_db.where_dict([\n (\n 'test_name', tn),\n (\n 'test_run', test_run),\n (\n 'project', project),\n (\n 'host', host),\n (\n 'context', context),\n (\n 'status', status),\n (\n 'attn', attn)], more_where)\n if more_where is None:\n c = pdk_db.execute('SELECT DISTINCT %s FROM result_scalar %s GROUP BY %s ORDER BY %s' % (\n field, where_text, field, field), where_dict)\n else:\n c = pdk_db.execute('SELECT DISTINCT %s FROM result_scalar, query %s GROUP BY %s ORDER BY %s' % (\n field, where_text, field, field), where_dict)\n for x, in c:\n if x is None:\n pass\n else:\n lquery[field] = x\n output.write(\"\" + x + '
')\n\n output.write('')\n\n\ndef linkout():\n output = sys.stdout\n output.write(common.cgi_header_html)\n output.write(common.page_header())\n if 'MSIE' in os.environ['HTTP_USER_AGENT']:\n output.write('

Internet Explorer fumbles the redirect. Click the link below.

')\n no_redirect = 1\n else:\n no_redirect = 0\n form = pandokia.pcgi.form\n context = form.getvalue('context', '*')\n host = form.getvalue('host', '*')\n test_run = form.getvalue('test_run', '*')\n project = form.getvalue('project', '*')\n status = form.getvalue('status', '*')\n attn = form.getvalue('attn', '*')\n oldqid = form.getvalue('qid', None)\n test_name = form.getvalue('test_name', '*')\n test_run = common.find_test_run(test_run)\n now = time.time()\n expire = now + common.cfg.default_qid_expire_days * 86400\n if pdk_db.next:\n newqid = pdk_db.next('sequence_qid')\n c = pdk_db.execute('INSERT INTO query_id ( qid, time, expires ) VALUES ( :1, :2, :3 ) ', (\n newqid,\n now,\n expire))\n else:\n c = pdk_db.execute('INSERT INTO query_id ( time, expires ) VALUES ( :1, :2 ) ', (now, expire))\n newqid = c.lastrowid\n print('content-type: text/plain\\n')\n print('QID %d' % newqid)\n pdk_db.commit()\n if oldqid is not None:\n print('WITH QID=%d' % int(oldqid))\n more_where = ' qid = %d AND result_scalar.key_id = query.key_id ' % int(oldqid)\n else:\n more_where = None\n where_text, where_dict = pdk_db.where_dict([\n (\n 'test_name', test_name),\n (\n 'test_run', test_run),\n (\n 'project', project),\n (\n 'host', host),\n (\n 'context', context),\n (\n 'status', status),\n (\n 'attn', attn)],\n more_where=more_where)\n if oldqid is None:\n c1 = pdk_db.execute('SELECT key_id FROM result_scalar ' + where_text, where_dict)\n else:\n c1 = pdk_db.execute('SELECT result_scalar.key_id FROM result_scalar, query %s' % where_text, where_dict)\n a = []\n for x in c1:\n key_id, = x\n a.append(key_id)\n\n for key_id in a:\n pdk_db.execute('INSERT INTO query ( qid, key_id ) VALUES ( :1, :2 ) ', (newqid, key_id))\n\n pdk_db.commit()\n url = pandokia.pcgi.cginame + '?query=summary&qid=%s' % newqid\n if 'add_attributes' in form:\n x = int(form.getvalue('add_attributes'))\n if x:\n url += '&show_attr=%d' % x\n if not no_redirect:\n output.write(\"\\n\\n\" % url)\n output.write(\"redirecting: qid = %s
\\n\" % (url, newqid))\n\n\ndef query_to_where_tuple(query, fields, more_where=None):\n l = []\n for x in fields:\n if x in query:\n v = query[x]\n l.append((x, v))\n\n return pdk_db.where_dict(l, more_where=more_where)\n\n\ndef collect_prefixes(query):\n test_name = query['test_name']\n have_qid = 'qid' in query\n if have_qid:\n qid = int(query['qid'])\n more_where = 'query.qid = %d AND query.key_id = result_scalar.key_id' % qid\n else:\n more_where = None\n where_text, where_dict = query_to_where_tuple(query, ('test_name', 'test_run',\n 'project', 'host', 'context',\n 'status', 'attn'), more_where)\n if not have_qid:\n c = pdk_db.execute('SELECT DISTINCT test_name FROM result_scalar %s GROUP BY test_name ORDER BY test_name' % where_text, where_dict)\n else:\n sys.stdout.flush()\n c = pdk_db.execute('SELECT DISTINCT test_name FROM result_scalar, query %s GROUP BY test_name ORDER BY test_name' % where_text, where_dict)\n l = len(test_name)\n prev_one = None\n prefixes = []\n for x in c:\n r_test_name, = x\n y = re.search('[/.]', r_test_name[l:])\n if not y:\n y = len(r_test_name[l:])\n this_one = r_test_name[:l + y + 1]\n else:\n y = y.start()\n this_one = r_test_name[:l + y + 1] + '*'\n if this_one != prev_one:\n if prev_one is not None:\n prefixes.append(prev_one)\n prev_one = this_one\n\n if prev_one is not None:\n prefixes.append(prev_one)\n return prefixes\n\n\ndef collect_table(prefixes, query, always_link):\n status = query['status']\n rownum = 0\n table = text_table.text_table()\n table.set_html_table_attributes('border=1')\n table.define_column('test_name')\n table.define_column('count')\n total_col = {}\n count_col = {}\n lquery = copy.copy(query)\n for x in common.cfg.statuses:\n if status == '*' or x in status:\n lquery['status'] = x\n table.define_column(x,\n showname=(common.cfg.status_names[x]),\n link=(common.selflink(lquery, 'treewalk')))\n total_col[x] = 0\n count_col[x] = 0\n\n total_count = 0\n total_row = rownum\n rownum = rownum + 1\n have_qid = 'qid' in query\n if have_qid:\n qid = int(query['qid'])\n more_where = ' qid = %d AND result_scalar.key_id = query.key_id ' % qid\n else:\n more_where = None\n for this_test_name in prefixes:\n lquery['test_name'] = this_test_name\n if '*' in this_test_name:\n linkmode = 'treewalk'\n else:\n linkmode = 'treewalk.linkout'\n lquery['status'] = status\n table.set_value(rownum,\n 'test_name',\n text=this_test_name,\n link=(common.selflink(lquery, linkmode)))\n table.set_html_cell_attributes(rownum, 'test_name', 'align=\"left\"')\n count = 0\n for x in common.cfg.statuses:\n if status == '*' or x in status:\n lquery['status'] = x\n where_text, where_dict = query_to_where_tuple(lquery, ('test_name',\n 'test_run',\n 'project',\n 'host', 'context',\n 'status',\n 'attn'), more_where)\n if not have_qid:\n ss = ''\n else:\n ss = ', query'\n c = pdk_db.execute('SELECT count(*) FROM result_scalar%s %s' % (\n ss, where_text), where_dict)\n datum = c.fetchone()\n if datum is None:\n count_col[x] = 0\n else:\n count_col[x], = datum\n lquery['status'] = x\n if not always_link:\n if count_col[x] == 0:\n table.set_value(rownum, x, text='0')\n else:\n table.set_value(rownum,\n x,\n text=(count_col[x]),\n link=(common.selflink(lquery, linkmode)))\n table.set_html_cell_attributes(rownum, x, 'align=\"right\"')\n count = count + count_col[x]\n total_count = total_count + count_col[x]\n\n table.set_value(rownum, 'count', text=count)\n table.set_html_cell_attributes(rownum, 'count', 'align=\"right\"')\n for x in total_col:\n total_col[x] += count_col[x]\n\n rownum = rownum + 1\n\n table.set_value(total_row, 'count', text=total_count)\n table.set_html_cell_attributes(total_row, 'count', 'align=\"right\" style=\"font-weight:bold\"')\n for x in common.cfg.statuses:\n if status == '*' or x in status:\n table.set_value(total_row, x, text=(total_col[x]))\n table.set_html_cell_attributes(total_row, x, 'align=\"right\" style=\"font-weight:bold\"')\n\n return table\n\n\ndef cmp_form(query, comparing):\n lquery = query.copy()\n del lquery['compare']\n lquery['cmp_test_run'] = lquery.get('cmp_test_run', common.run_previous(None, lquery['test_run']))\n lquery['cmp_context'] = lquery.get('cmp_context', lquery['context'])\n lquery['cmp_host'] = lquery.get('cmp_host', lquery['host'])\n lquery['query'] = 'treewalk'\n l = [\n '[ Compare]\\n
\\n
    \\n ',\n '
    ' % common.get_cgi_name(),\n '']\n for x in ('cmp_test_run', 'cmp_context', 'cmp_host'):\n l.append(\"\" % (\n x, x, lquery[x]))\n del lquery[x]\n\n l.append('
    %s
    ')\n l.append(common.query_dict_to_hidden(lquery))\n l.append(\" \")\n l.append(\" \")\n l.append(\" \")\n l.append('
    ')\n l.append('\\n
\\n
\\n \\n ')\n return '\\n'.join(l)","sub_path":"pycfiles/pandokia-2.3.0.tar/pcgi_treewalk.cpython-36.py","file_name":"pcgi_treewalk.cpython-36.py","file_ext":"py","file_size_in_byte":18487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"85423939","text":"# -*- coding: utf-8 -*-\nimport pygame\nimport sys\nimport os\nimport random\n\n'''\nTutorial Code from here: https://opensource.com/article/17/12/game-framework-python\n'''\n\n\n\n'''\nObjects - put Python classes and functions here\n'''\nclass Snake(pygame.sprite.Sprite):\n '''\n Spawn a player\n '''\n\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.images = []\n\n img = pygame.image.load(os.path.join('images', 'hero.png')).convert()\n self.images.append(img)\n self.image = self.images[0]\n self.rect = self.image.get_rect()\n\n self.movex = 0 # move along X\n self.movey = 0 # move along Y\n self.frame = 0 # count frames\n\n def control(self, x, y):\n self.movex += x\n self.movey += y\n\n def update(self):\n self.rect.x = self.rect.x + self.movex\n self.rect.y = self.rect.y + self.movey\n\n\n\n\n'''\nSetup - put run-once code here\n'''\ndead = True\n\nworldx = 600\nworldy = 428\n\nfps = 40 # frame rate\nani = 4 # animation cycles\nclock = pygame.time.Clock()\npygame.init()\n\nworld = pygame.display.set_mode([worldx, worldy])\nbackdrop = pygame.image.load(os.path.join('images', 'snake_background.png'))\nbackdropbox = world.get_rect()\n\nsnake = Snake()\nsnake.rect.x = 0\nsnake.rect.y = 0\nplayer_list = pygame.sprite.Group()\nplayer_list.add(snake)\nsteps = 10\n\n'''\nMain loop - put game loop here\n'''\nwhile dead == True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit();\n sys.exit()\n dead = False\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT or event.key == ord('a'):\n snake.control(-steps, 0)\n if event.key == pygame.K_RIGHT or event.key == ord('d'):\n snake.control(steps, 0)\n\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == ord('a'):\n snake.control(steps, 0)\n if event.key == pygame.K_RIGHT or event.key == ord('d'):\n snake.control(-steps, 0)\n if event.key == ord('q'):\n pygame.quit()\n sys.exit()\n dead = False\n\n snake.update() # update player position\n world.blit(backdrop, backdropbox)\n player_list.draw(world)\n pygame.display.flip()\n clock.tick(fps)\n\n\n","sub_path":"Tutorial1.py","file_name":"Tutorial1.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67030785","text":"import re\n\nfrom relation import relation, relationData\nfrom supportedRelations import listeRelation, grammar, dic\n\nUNIVERSAL = re.compile(r'.*')\nLIVE_IN = re.compile(r'.*(live|lives|inhabit|inhabits|are|is|living)+.*\\bin\\b(?!\\b.+ing)')\nTHERE = re.compile(r'.*((There.*are)|(there.*are)|(There.*is)|(there.*is))+\\b(?!.+than)')\nTHERE_MORE = re.compile(r'.*((There.*are)|(there.*are)|(There.*is)|(there.*is)).*(((more|greater).*than)|(superior.*to))')\nTHERE_LESS = re.compile(r'.*((There.*are)|(there.*are)|(There.*is)|(there.*is)).*(((less|shorter).*than)|(inferior.*to))')\nIN = re.compile(r'.*\\bin\\b(?!\\b.+ing)')\n\nnumber_dic = {\n 'billion': '000000000',\n 'million': '000000',\n 'thousand': '000',\n 'hundread': '00'\n}\n\nhaspop_grammar = \"CDD: {*} \\nPPCD: {}\"\n\nhaspop_dic = {\n\t\"people\": \"PPUNIT\",\n\t\"inhabitants\": \"PPUNIT\"\n}\n\ndef replace_int(x):\n try:\n int(x)\n return x\n except ValueError:\n if(x in number_dic.keys()):\n return number_dic[x]\n else:\n return \"\";\n\ndef make_nice(text):\n text = [replace_int(x) for x in text.split('_')]\n return(\"\".join(text))\n\nhasPopulation = relation('hasPopulation' , 'PPCD' , 'GPE' , make_nice , patterns_list=[\n {'left': UNIVERSAL, 'middle': LIVE_IN, 'comparator': 'egal'},\n {'left': THERE, 'middle': IN, 'comparator': 'egal'},\n {'left': THERE_MORE, 'middle': IN, 'comparator': 'more'},\n {'left': THERE_LESS, 'middle': IN, 'comparator': 'less'}\n])\n\nlisteRelation.append(hasPopulation)\ndic.update(haspop_dic)\ngrammar.append(haspop_grammar)\n","sub_path":"client/supportedRelations/hasPopulation.py","file_name":"hasPopulation.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553102344","text":"#Populate the database with default values...\nif db(db.company).count() == 0:\n db.company.insert(\n name='Python Constructor',\n info='This is a great company...'\n)\ncompany = db(db.company.name == 'Python Constructor').select().first()\nif company:\n if db(db.employee.employer == company).count() == 0:\n db.employee.insert(\n employer = company,\n name='espern',\n info='nice guy!'\n )","sub_path":"models/x_fixtures.py","file_name":"x_fixtures.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"452729215","text":"# -*- coding:utf-8 -*-\nimport logging\nclass MyLogger(object):\n\t\"\"\"\n\t# 自定义log输出器\n\t# 单例模式\n\t\"\"\"\n\t_instance = None\n\t_log_obj = None\n\t_log_path = None\n\n\tmlog = None\n\tdef __new__(cls, *args, **kw):\n\t\tif cls._instance is None:\n\t\t\tcls._instance = object.__new__(cls, *args, **kw)\n\t\tcls._log_obj = args[0]\n\t\tcls._log_path = args[1]\n\t\treturn cls._instance\n\n\tdef __init__(self, log_obj, log_path=\"/tmp\"):\n\t\tif self.mlog is None:\n\t\t\tself.mlog = self.my_logger()\n\t\tpass\n\n\tdef my_logger(self):\n\t\t\"\"\"\n\t\t# 自定义log显示及输出\n\t\t:param log_obj: object name who exec logger\n\t\t:param log_path: log output filepath\n\t\t:return: logger obj\n\t\t\"\"\"\n\t\tlogger = logging.getLogger(self._log_obj)\n\t\t# print(logger.handlers)\n\t\tlogger.setLevel(logging.INFO)\n\n\t\tconsole_handle = logging.StreamHandler()\n\t\tfile_handle = logging.FileHandler(filename=self._log_path + \"/\" +self._log_obj + \".log\")\n\t\tformatter = logging.Formatter('%(asctime)s - %(name)s:%(funcName)s - %(levelname)s - %(message)s ')\n\t\tconsole_handle.setFormatter(formatter)\n\t\tfile_handle.setFormatter(formatter)\n\t\tlogger.addHandler(console_handle)\n\t\tlogger.addHandler(file_handle)\n\n\t\treturn logger\n\n\nif __name__ == \"__main__\":\n\n\twhile True:\n\t\t# MyLogger 是单例模式 实现,否则每次新创建自定义logging一次都会导致log输出重复度加1\n\t\tmlog = MyLogger(\"test\", \"/tmp\").mlog\n\t\tmlog.info(\"helloworld\")\n\t\tmlog.info(\"testing log\")\n\t\timport time\n\t\ttime.sleep(3)","sub_path":"Python/Pexpect/pexpect_demo/MyLogger.py","file_name":"MyLogger.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"314807832","text":"'''\r\n1 方程组的矩阵形式\r\nPE=A*E+B*U (1-1)\r\n\r\n2 步骤:\r\n2.1 首先定义并且进行初始化\r\nE = [ Edp , Eqp , Edpp ,Eqpp , Efd1 ,Efd2 ,Efd3 ,Efd4]\r\n Efd1 ,Efd2 ,Efd3 ,Efd4为由于励磁PID导致的状态变量 \r\n\r\n\r\n2.2 计算流程\r\n(1)对E,U初始化\r\n(2)对式(1-1)调用龙格库塔计算E,所有的变量都是E或者E中的量,这样才能进行龙格库塔计算,因为只有E一个变量\r\n(3)广科的控制模型里面cos=K(uref-ug)/ug 里面的量并未涉及励磁系统的定标,串联传递函数也未涉及\r\n励磁系统的定标,那么也就是说厂家用什么定标实际上对仿真没有影响。仿真实际上做的是仿真的框架。\r\n'''\r\n\r\nimport os \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 步骤一(替换sans-serif字体)\r\nplt.rcParams['axes.unicode_minus'] = False # 步骤二(解决坐标轴负数的负号显示问题)\r\n\r\n\r\nclass Model():\r\n def __init__(self):\r\n super().__init__()\r\n self.DefaultParametersetting()\r\n\r\n def DefaultParametersetting(self):\r\n self.dt = 0.01\r\n self.t = 0.0\r\n self.y = 3.0\r\n self.tseq = [0.0]\r\n self.yseq = [3.0]\r\n self.tseq_original = np.arange(0, 1, 0.01)\r\n self.yseq_original = self.tseq_original**3 - self.tseq_original**2 + 3\r\n########################################################################################\r\n #控制参数 皂市1数据\r\n self.Tr = 0.01\r\n self.TA = 0.01\r\n self.K = 22*7.84\r\n self.Kv = 1.0\r\n self.T1 = 1.0\r\n self.T2 = 4.0\r\n self.T3 = 1.0\r\n self.T4 = 1.0\r\n #发电机参数 皂市1数据\r\n self.Tq0p = 0.001\r\n self.Td0p = 9.45\r\n self.Tq0pp = 0.198 #0.198\r\n self.Td0pp = 0.091\r\n self.a = 1.0\r\n self.b = 0.192\r\n self.n = 6.246\r\n # #控制参数 湘潭3数据\r\n # self.Tr = 0.02\r\n # self.TA = 0.01\r\n # self.K = 500\r\n # self.T1 = 1.0\r\n # self.T2 = 8.33\r\n # self.T3 = 1.0\r\n # self.T4 = 1.0\r\n # #发电机参数 湘潭3数据\r\n # self.Tq0p = 0.95\r\n # self.Td0p = 9.32\r\n # self.Tq0pp = 0.069 \r\n # self.Td0pp = 0.045\r\n # self.a = 1.0\r\n # self.b = 0.186\r\n # self.n = 8.357\r\n#########################################################################################\r\n #需求变量定义及初始化\r\n self.ud0 = 0\r\n self.uq0 = 0.9514\r\n self.Edp0 = 0\r\n self.Eqp0 = self.uq0\r\n self.Edpp0 = 0\r\n self.Eqpp0 = self.uq0\r\n self.KG0 = 1 + self.b / self.a * self.Eqpp0**(self.n - 1)\r\n self.Efd0 = self.uq0 * self.KG0\r\n self.uref0 = self.Efd0/self.K+self.uq0 #关键\r\n self.deltaU = 0.0485\r\n self.Tstart = 1\r\n self.Tend = 8\r\n self.Tstepdelay = 0.01 #不可为0,否则将除以0 \r\n self.Efd10 = self.uref0-self.uq0\r\n self.Efd20 = self.Efd0\r\n self.Efd30 = self.Efd0\r\n self.Efd40 = self.Efd0\r\n \r\n self.dtvector = np.array([0.01]*8).reshape(-1,1)\r\n self.tvector = np.array([0]*8).reshape(-1,1)\r\n self.tmatrix = np.array([0]*8).reshape(-1,1)\r\n\r\n self.Evector = np.array([self.Edp0, self.Eqp0, self.Edpp0,self.Eqpp0,self.Efd10,self.Efd20,self.Efd30,self.Efd40]).reshape(-1, 1) #[Edp,Eqp,Edpp,Eqpp]\r\n self.Ematrix = self.Evector\r\n\r\n self.pEvector = np.array([0]*8).reshape(-1, 1) #-1表示我懒得计算该填什么数字,由python通过原数组和其他的值3推测出来。\r\n self.pEmatrix = self.pEvector\r\n\r\n self.A = np.array([[-1/self.Tq0p , 0,0,0,0,0,0,0],\\\r\n [0 , -(1 + self.b / self.a * float(self.Evector[3])**(self.n - 1))/self.Td0p,0,0,0,0,0,1/self.Td0p],\\\r\n [(1/self.Tq0pp-1/self.Tq0p),0,-1/self.Tq0pp ,0, 0,0,0,0],\\\r\n [0 , (1/self.Td0pp-(1 + self.b / self.a * float(self.Evector[3])**(self.n - 1))/self.Td0p),0,-1/self.Td0pp,0,0,0,1/self.Td0p],\\\r\n [0,0,0,0,-1/self.Tr,0 ,0,0],\\\r\n [0,0,0,0,self.K/self.TA,-1/self.TA ,0,0],\\\r\n [0,0,0,0,self.T1/self.T2*self.K/self.TA,1/self.T2-self.T1/(self.T2*self.TA),-self.Kv/self.T2,0],\\\r\n [0,0,0,0,self.T1/self.T2*self.T3/self.T4*self.K/self.TA ,self.T3/self.T4*(1/self.T2-self.T1/(self.T2*self.TA)),(1/self.T4-self.Kv*self.T3/(self.T4*self.T2)),-1/self.T4]\r\n ])\r\n self.B = np.array([0,0,0,0,1,0,0,0]).reshape(-1, 1)\r\n\r\n print(\"A = \\n\",self.A)\r\n print(\"B = \\n\",self.B)\r\n print(\"E = \\n\",self.Ematrix)\r\n\r\n def rk4(self, y, f, dt, t):\r\n\r\n k1 = f(t, y)\r\n k2 = f(t + dt / 2, y + dt / 2 * k1)\r\n k3 = f(t + dt / 2, y + dt / 2 * k2)\r\n k4 = f(t + dt, y + dt * k3)\r\n ynext = y + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)\r\n return ynext\r\n\r\n def uref_fun(self,t):\r\n\r\n if t< self.Tstart :\r\n temp = self.uref0\r\n elif t CxHxW, [28x28] -> [1x28x28], [0,255] -> [0,1]\n transforms.ToTensor(),\n # mean=(0.5,0.5,0.5) and std. It is for 3 channels\n #mnist only has one channel\n transforms.Normalize(mean=(0.5,), std=(0.5,))\n ])\npath = '../../__SSSSShare_DDDate_set/mnist_data/'\ntrain_dataset = datasets.MNIST(root=path, train=True,\n transform=transform,\n download=True)\ntest_dataset = datasets.MNIST(root=path, train=False,\n transform=transform,\n download=False)\ndef normal_init(m, mean, std):\n if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):\n m.weight.data.normal_(mean, std)\n m.bias.data.zero_()\n\n# Formula: O=(I-1)xS - 2P + K\n# -----------------------\n# l1: I=1, O=3, K=3, S=1\n# l2: I=3, O=5, K=3, S=1\n# l3: I=5, O=12, K=4, S=2\n# l4: I=12, O=26, K=4, S=2\n# l5: I=26, O=28, K=3, S=1\n# -----------------------\n# l1: I=1, O=3, K=3, S=2\n# l2: I=3, O=7, K=3, S=2\n# l3: I=7, O=13,K=3, S=2, P=1\n# l4: I=13,O=28,K=4, S=2\n# ------------------------\n# l1: I=1, O=4, K=4, S=2\n# l2: I=4, O=9, K=3, S=2\n# l3: I=9, O=28,K=4, S=3\nclass generator(nn.Module):\n # initializers\n def __init__(self, d=128):\n super(generator, self).__init__()\n self.deconv1 = nn.ConvTranspose2d(100, d * 8, 3, 1)\n self.deconv1_bn = nn.BatchNorm2d(d * 8)\n self.deconv2 = nn.ConvTranspose2d(d * 8, d * 4, 3, 1)\n self.deconv2_bn = nn.BatchNorm2d(d * 4)\n self.deconv3 = nn.ConvTranspose2d(d * 4, d * 2, 4, 2)\n self.deconv3_bn = nn.BatchNorm2d(d * 2)\n self.deconv4 = nn.ConvTranspose2d(d * 2, d, 4, 2)\n self.deconv4_bn = nn.BatchNorm2d(d)\n self.deconv5 = nn.ConvTranspose2d(d, 1, 3, 1)\n\n # weight_init\n def weight_init(self, mean, std):\n for m in self._modules:\n normal_init(self._modules[m], mean, std)\n\n # forward method\n def forward(self, input):\n # x = F.relu(self.deconv1(input))\n x = F.relu(self.deconv1_bn(self.deconv1(input)))\n x = F.relu(self.deconv2_bn(self.deconv2(x)))\n x = F.relu(self.deconv3_bn(self.deconv3(x)))\n x = F.relu(self.deconv4_bn(self.deconv4(x)))\n x = torch.tanh(self.deconv5(x))\n\n return x\n\nclass discriminator(nn.Module):\n # initializers\n def __init__(self, d=128):\n super(discriminator, self).__init__()\n self.conv1 = nn.Conv2d(1, d, 3, 1)\n self.conv2 = nn.Conv2d(d, d*2, 4, 2)\n self.conv2_bn = nn.BatchNorm2d(d*2)\n self.conv3 = nn.Conv2d(d*2, d*4, 4, 2)\n self.conv3_bn = nn.BatchNorm2d(d*4)\n self.conv4 = nn.Conv2d(d*4, d*8, 3, 1)\n self.conv4_bn = nn.BatchNorm2d(d*8)\n self.conv5 = nn.Conv2d(d*8, 1, 3, 1)\n\n # weight_init\n def weight_init(self, mean, std):\n for m in self._modules:\n normal_init(self._modules[m], mean, std)\n\n # forward method\n def forward(self, input):\n x = F.leaky_relu(self.conv1(input), 0.2)\n x = F.leaky_relu(self.conv2_bn(self.conv2(x)), 0.2)\n x = F.leaky_relu(self.conv3_bn(self.conv3(x)), 0.2)\n x = F.leaky_relu(self.conv4_bn(self.conv4(x)), 0.2)\n x = torch.sigmoid(self.conv5(x))\n x = x.squeeze()\n return x\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n# device = torch.device('cpu')\n\n# hyper parameter\nbatch_size = 100\nn_epoch = 200\nlr = 0.0002\n\n# Data Loader\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True)\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n shuffle=False)\n\n# build network\n# z_dim = 100\nG = generator(64)\nD = discriminator(64)\n\nG.weight_init(mean=0.0, std=0.02)\nD.weight_init(mean=0.0, std=0.02)\nG.to(device)\nD.to(device)\n\n# optimizer\n# G_optimizer = optim.Adam(G.parameters(), lr = lr)\n# D_optimizer = optim.Adam(D.parameters(), lr = lr)\nG_optimizer = optim.Adam(G.parameters(), lr=lr, betas=(0.5, 0.999))\nD_optimizer = optim.Adam(D.parameters(), lr=lr, betas=(0.5, 0.999))\n\nDL = []\nGL = []\nshow_size = 10\nfor epoch in range(n_epoch):\n D_losses = []\n G_losses = []\n for x, _ in train_loader:\n x = x.to(device)\n z = torch.randn((batch_size, 100)).view(-1, 100, 1, 1)\n z = z.to(device)\n x_fake = G(z)\n D_loss = torch.mean(torch.log(D(x)) + torch.log(1-D(x_fake)))\n D_loss = D_loss * (-1)\n # D.zero_grad()\n D_optimizer.zero_grad()\n D_loss.backward()\n D_optimizer.step()\n D_losses.append(D_loss.item())\n\n z = torch.randn((batch_size, 100)).view(-1, 100, 1, 1)\n z = z.to(device)\n x_fake = G(z)\n G_loss = torch.mean(torch.log(D(x_fake)))\n G_loss = G_loss * (-1)\n # G.zero_grad()\n G_optimizer.zero_grad()\n G_loss.backward()\n G_optimizer.step()\n G_losses.append(G_loss.item())\n\n # break\n DL.append(np.mean(D_losses))\n GL.append(np.mean(G_losses))\n\n\n if epoch % 10 == 0:\n if batch_size < show_size:\n show_size = batch_size\n imgs = x_fake[:show_size]\n imgs = imgs.cpu().detach().numpy()\n imgs = np.reshape(imgs, (-1, 28, 28))\n plt.gray()\n fig, axs = plt.subplots(1, show_size,\n gridspec_kw={'hspace': 0, 'wspace': 0})\n axs[0].set_title('Epoch:'+ str(epoch))\n for n in range(show_size):\n axs[n].imshow(imgs[n])\n axs[n].axis('off')\n fig.set_size_inches(np.array(fig.get_size_inches()) * show_size* 0.25)\n plt.show()\n\n # break\n# plt.plot(range(n_epoch), D_losses)\nplt.plot(range(len(DL)), DL, 'r')\nplt.plot(range(len(GL)), GL, 'b')\nplt.legend(('Discriminator', 'Generator'),\n loc='upper right')\nplt.xlabel('Epoch')\nplt.ylabel('Loss Value')\nplt.show()\n","sub_path":"GAN/sandbox/gan_v0002.py","file_name":"gan_v0002.py","file_ext":"py","file_size_in_byte":6294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"471739956","text":"## This file contains codes for the prediction of incidence rates from Type 2 diabetes using xgboost.\n## We predict incidence rates within 5 years for the baseline cohort.\n## Last modified in 2019.11.04 by Danbi Chung\n\n## Import libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score, cohen_kappa_score, roc_auc_score, roc_curve\nimport scipy.stats as st\nfrom xgboost.sklearn import XGBClassifier \nfrom sklearn.model_selection import train_test_split, RandomizedSearchCV\nfrom xgboost import plot_importance\nfrom sklearn.externals import joblib\n\n\n########################################################################\n### Data preprocessing\n########################################################################\n\n## Read data\ndata=pd.read_stata('D:/Dropbox/2019-보험개발원-질병연구/분석결과/baseline/t2dm_baseline.dta')\n# Convert all field names to lower case. \ndata.columns = map(str.lower, data.columns)\n# person_id as index\ndata.set_index('person_id',inplace=True)\n#data.shape\n#data.info()\n#data.head()\n\n## Trim data\n# convert value labels yes/no to 1/0\nv_encode = 'fmly_liver','fmly_hprts','fmly_apop','fmly_hdise','fmly_diabml','fmly_cancer'\nfor i in v_encode:\n data[i] = data[i].apply(lambda x: '1' if x=='yes' else '0')\n\n \n# Extract the columns we need\ndata = data[['sex','age_group','sido','ipsn_type_cd','ctrb_pt_type_cd','dfab_grd_cd',\n 'asthma','af','bc','cc','chf','ckd','gc','copd','dementia','depression','fracture',\n 'hyperten','hc','hyperlip','lc','mi','pneumonia','ra','stroke',\n 'height','weight','bp_high','bp_lwst','blds','tot_chole','hmg','gly_cd','olig_occu_cd','olig_ph','olig_prote_cd',\n 'sgot_ast','sgpt_alt','gamma_gtp','fmly_liver','fmly_hprts','fmly_apop','fmly_hdise','fmly_diabml','fmly_cancer',\n 'exerci_freq','waist','bmi','smk_dum','drnk_dum',\n 't2dm_within1','t2dm_within2','t2dm_within3','t2dm_within4','t2dm_within5']]\n#data.shape\n\n## Imputing NaN values\n#data.isnull().sum()\nv_mean= ['height','weight','bp_high','bp_lwst','blds','tot_chole','hmg','olig_ph','sgot_ast','sgpt_alt','gamma_gtp','waist','bmi']\nv_mode= ['olig_prote_cd','gly_cd','olig_occu_cd','fmly_liver','fmly_hprts','fmly_apop','fmly_hdise','fmly_diabml','fmly_cancer',\n 'exerci_freq','drnk_dum']\nfor i in v_mean:\n data[i].fillna(data[i].mean(), inplace=True)\nfor i in v_mode:\n data[i].fillna(data[i].mode()[0], inplace=True)\n\n## Data type conversion\nv_cat= ['af','age_group','asthma','bc','gc','cc','chf','ckd','copd','ctrb_pt_type_cd','dementia','depression','dfab_grd_cd',\n 'drnk_dum','exerci_freq','fracture','hyperten', 'gly_cd','hc','hyperlip','ipsn_type_cd','lc','mi',\n 'olig_occu_cd','olig_prote_cd','pneumonia','ra','sex','sido','smk_dum','stroke',\n 't2dm_within1','t2dm_within2','t2dm_within3','t2dm_within4','t2dm_within5']\nfor i in v_cat:\n data[i]=data[i].astype('category')\n\n## Extract the columns for model training\ndf = data[data.columns[:-5]]\n\n# Categorical variables\ndf_cat=df.select_dtypes(['category'])\n# do one-hot encodings\ndf_cat_ohe = pd.get_dummies(df_cat)\n\n# Non-categorical variables\ndf_num=df.select_dtypes(exclude=['category'])\n# standardize non-categorical variables\ndf_num1 = df_num.apply(lambda x: (x - np.min(x)) / (np.max(x) - np.min(x)))\ndf_num1.describe(include='all')\ndf_num2 = df_num.apply(lambda x: (x - np.mean(x)) / (np.std(x)))\ndf_num2.describe(include='all')\n\n## Split train and test data\nX = pd.concat([df_num2, df_cat_ohe], axis=1, join='inner')\ny= data[\"t2dm_within5\"]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=123)\n\n\n\n########################################################################\n### XGBoost using Randomized Search\n########################################################################\n\n## Parameter tunning\n# For faster computation, we use 30% of the training data for hyperparameter tunning\nZ_train, Z_test, zy_train, zy_test = train_test_split(X_train, y_train, test_size=0.3, random_state=123)\n\none_to_left = st.beta(10, 1) \nfrom_zero_positive = st.expon(0, 50)\n\nparams = { \n \"n_estimators\": st.randint(100, 1000),\n \"max_depth\": st.randint(3, 10),\n \"learning_rate\": st.uniform(0.01, 0.4),\n \"colsample_bytree\": one_to_left,\n \"subsample\": one_to_left,\n \"gamma\": st.uniform(0, 10),\n 'reg_alpha': from_zero_positive,\n 'min_child_weight': from_zero_positive,\n 'scale_pos_weight':st.randint(2, 300),\n \n}\n\nxgbcls = XGBClassifier(nthreads=-1)\n\ngs0 = RandomizedSearchCV(xgbcls, params, scoring = 'f1', n_iter =100, verbose = 30, n_jobs=-1) \n# We also tried scoring = 'f1_macro', 'neg_log_loss', 'balanced_accuracy', and 'roc_auc.' \n\ngs0.fit(Z_train, zy_train) \ngs0.best_estimator_.get_params()\n\nparams0 = gs0.best_estimator_.get_params()\nnlist= ['base_score', 'booster', 'colsample_bylevel', 'colsample_bynode', 'colsample_bytree', 'gamma', \n 'learning_rate', 'max_delta_step', 'max_depth', 'min_child_weight', 'missing', 'n_estimators', \n 'n_jobs', 'objective', 'random_state', 'reg_alpha', 'reg_lambda', 'scale_pos_weight', \n 'seed', 'silent', 'subsample','verbosity']\nfor v in nlist:\n globals()['%s0' %v] = params0[v] \n\n\nxgb0 = XGBClassifier(\n colsample_bylevel = colsample_bylevel0, \n colsample_bytree = colsample_bytree0, \n gamma = gamma0, \n learning_rate = learning_rate0, \n max_depth = max_depth0, \n min_child_weight = min_child_weight0, \n n_estimators = n_estimators0, \n reg_alpha = reg_alpha0, \n reg_lambda = reg_lambda0, \n scale_pos_weight = scale_pos_weight0,\n seed = seed0, \n subsample = subsample0, \n eval_metric = \"auc\", \n nthread=-1)\n\n# Save the model\njoblib.dump(xgb0, 'D:/Dropbox/2019-보험개발원-질병연구/분석결과/Python_code/xgb_model/xgb_t2dm_5yr.sav')\n\n\n## Fit the model\nxclas0 = xgb0.fit(X_train, y_train)\nprint(xclas0)\n\ny_pred0= xclas0.predict(X_test)\n\n\n## Plot the feature importance\nplot_importance(xgb0, max_num_features=20, importance_type='gain', grid=True)\nplt.show()\nxgb0.get_booster().get_score(importance_type='gain')\n","sub_path":"xgboost_t2dm_v1.py","file_name":"xgboost_t2dm_v1.py","file_ext":"py","file_size_in_byte":6151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"375851260","text":"\"\"\"\nTest NSoT utilities.\n\"\"\"\n\nfrom nsot.util import SetQuery, parse_set_query\n\n\ndef test_parse_set_query():\n \"\"\"\n Parse a bunch of set queries and make sure that the expected result\n matches.\n \"\"\"\n # List of 2-tuples of (query, expected_result)\n set_tests = {\n 'foo=bar': [\n ('intersection', 'foo', 'bar'),\n ],\n 'foo=bar owner=jathan': [\n ('intersection', 'foo', 'bar'),\n ('intersection', 'owner', 'jathan'),\n ],\n '-owner=gary': [\n ('difference', 'owner', 'gary'),\n ],\n 'cluster +foo=baz': [\n ('intersection', 'cluster', ''),\n ('union', 'foo', 'baz'),\n ],\n # Extra white space\n 'cluster=lax +foo=baz': [\n ('intersection', 'cluster', 'lax'),\n ('union', 'foo', 'baz'),\n ],\n }\n\n # Make sure that result matches expected_result\n for query, expected_result in set_tests.iteritems():\n result = parse_set_query(query)\n assert result == expected_result\n","sub_path":"tests/api_tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"123063573","text":"##\n## Programación en Python\n## ===========================================================================\n##\n## Genere una lista de tuplas, donde cada tupla contiene en la primera \n## posicion, el valor de la segunda columna; la segunda parte de la \n## tupla es una lista con las letras (ordenadas y sin repetir letra) \n## de la primera columna que aparecen asociadas a dicho valor de la \n## segunda columna. Esto es:\n##\n## Rta/\n## ('0', ['C'])\n## ('1', ['A', 'B', 'D', 'E'])\n## ('2', ['A', 'D', 'E'])\n## ('3', ['A', 'B', 'D', 'E'])\n## ('4', ['B', 'E'])\n## ('5', ['B', 'C', 'D', 'E'])\n## ('6', ['A', 'B', 'C', 'E'])\n## ('7', ['A', 'C', 'D', 'E'])\n## ('8', ['A', 'B', 'E'])\n## ('9', ['A', 'B', 'C', 'E'])\n##\n## >>> Escriba su codigo a partir de este punto <<<\n##\nDatos = open('data.csv','r').readlines()\nDatos = [l.split('\\t') for l in Datos]\nD2 = [a[1] for a in Datos]\nOD2 = sorted(set(D2))\n\nfor i in OD2:\n p = []\n for a in Datos:\n if a[1] == i:\n p.append(a[0])\n l = sorted(list(set(p)))\n print((i, l))","sub_path":"03-python=1/q08=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"148519711","text":"\"\"\"Counting letters in a string.\"\"\"\n\n__author__ = \"730240245\"\n\n\n# Begin your solution here...\ni: int = 0 \nsearch: str = input(\"What letter do you want to search for?: \")\nword: str = input(\"Enter a word: \")\nwhile i < len(word):\n how_many: int = word.count(search)\n print(\"Count: \" + str(how_many))\n i = len(word)","sub_path":"exercises/ex02/count_letters.py","file_name":"count_letters.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"174003834","text":"from PVector import PVector\nfrom PMatrix import PMatrix\nfrom PNode import PNode\nfrom Parser import Parser \nfrom PElement import PElement\nfrom PMaterial import PMaterial\nfrom PBoundary import PBoundary\nfrom PNodalload import PNodalload\nfrom PSection import *\nfrom PNodalResponse import PNodalResponse\nfrom PElementResponse import PElementResponse\nfrom PWriteHTML import PWriteHTML\n\n# Handles input of the FE Analysis Program\nclass FileIO:\n def ReadFile(self,ifile): \n#Initialising the parser with the input file\n self.inputfile = Parser(ifile)\n#Reading nodes of the structure\n nodes = self.inputfile.read(\"Nodes\")\n self.Nodaldata = PVector(len(nodes),PNode)\n C = PVector(3)\n for i in range(1,len(nodes)+1):\n ID,C[1],C[2],C[3] = nodes[i-1].split()\n self.Nodaldata[i].SetValues(C)\n self.Nodaldata[i].SetNodeNo(ID)\n#Reading elements of the structure\n elements = self.inputfile.read(\"Elements\")\n self.Elementdata = PVector(len(elements),PElement)\n E = PVector(2)\n for i in range(1,len(elements)+1):\n ID,E[1],E[2],sec,mat,etype = elements[i-1].split()\n self.Elementdata[i].SetValues(E,sec,mat,etype)\n self.Elementdata[i].SetElementNo(ID)\n\n#Reading material property\n materials = self.inputfile.read(\"Material\")\n self.Materialdata = PVector(len(materials),PMaterial)\n for i in range(1,len(materials)+1):\n ID,E,P,T = materials[i-1].split()\n ID = int(ID)\n E = float(E)\n P = float(P)\n T = float(T)\n self.Materialdata[i].SetValues(E,P,T)\n self.Materialdata[i].SetMaterialNo(ID)\n \n#Reading cross-sectional property\n sections = self.inputfile.read(\"Section Property\")\n self.Sectiondata = PVector(len(sections),PSection)\n for i in range(1,len(sections)+1):\n if (sections[i-1].split()[1].lower() == \"rectangle\"):\n rec = PVector(2)\n ID,Type,rec[1],rec[2] = sections[i-1].split()\n rec[1]= float(rec[1])\n rec[2]= float(rec[2])\n self.Sectiondata[i] = PRectangle(ID,rec)\n elif (sections[i-1].split()[1].lower() == \"circle\"):\n circ = PVector(1)\n ID,Type,circ[1] = sections[i-1].split()\n circ[1] = float(circ[1])\n self.Sectiondata[i] = PCircle(ID,circ)\n elif (sections[i-1].split()[1].lower() == \"isection\"):\n isec = PVector(4)\n ID,Type,isec[1], isec[2], isec[3], isec[4] = sections[i-1].split()\n isec[1] = float(isec[1])\n isec[2] = float(isec[2])\n isec[3] = float(isec[3])\n isec[4] = float(isec[4])\n self.Sectiondata[i] = PISection(ID,isec)\n\n#Reading support conditions\n boundary = self.inputfile.read(\"Boundary Conditions\")\n self.Boundarydata = PVector(len(boundary),PBoundary)\n for i in range(1,len(boundary)+1):\n b = PVector(6)\n node, b[1], b[2], b[3], b[4], b[5], b[6] = boundary[i-1].split()\n node = int(node)\n b[1] = int(b[1])\n b[2] = int(b[2])\n b[3] = int(b[3])\n b[4] = int(b[4])\n b[5] = int(b[5])\n b[6] = int(b[6])\n\n self.Boundarydata[i] = PBoundary(node,b)\n \n#Reading loads\n#-------------Nodal loads\n nload = self.inputfile.read(\"Nodal loads\")\n self.Nodalloaddata = PVector(len(nload),PNodalload)\n for i in range(1,len(nload)+1):\n force = PVector(6)\n node,force[1],force[2],force[3],force[4],force[5],force[6] = nload[i-1].split()\n node = int(node)\n force[1] = float(force[1])\n force[2] = float(force[2])\n force[3] = float(force[3])\n force[4] = float(force[4])\n force[5] = float(force[5])\n force[6] = float(force[6])\n \n self.Nodalloaddata[i] = PNodalload(node,force)\n\n\n#Finite Element Response Data\n self.NodalRdata = PVector(len(self.Nodaldata),PNodalResponse)\n self.ElementRdata = PVector(len(self.Elementdata),PElementResponse)\n \n \n\n \"\"\" Not needed for Truss Problem\n\n#-------------Element loads\n eload = self.inputfile.read(\"Element loads\")\n\n#Reading load combinations\n lcomb = self.inputfile.read(\"Load combination\")\n\n del(self.inputfile)\n \"\"\"\n\n def WriteHTMLFile(self,ofile):\n self.outputfile=PWriteHTML(ofile)\n \n nodes = len(self.Nodaldata)\n DOI = 0\n b = PVector(6)\n for i in self.Boundarydata:\n b = i.GetBC()\n if b[1] == 1:\n DOI = DOI - 1\n if b[2] == 1:\n DOI = DOI - 1\n if b[3] == 1:\n DOI = DOI - 1\n \n # Printing the problem size\n line1 = \"Number of nodes = \" + str(len(self.Nodaldata))\n line2 = \"Number of elements = \" + str(len(self.Elementdata))\n line3 = \"Number of DOF = \" + str(nodes*3+DOI)\n text = [line1,line2,line3]\n self.outputfile.Block(text)\n\n # Printing nodal coordinates\n title = \"NODAL COORDINATES\"\n head = PVector(4)\n head[1] = \"Node\"\n head[2] = \"X-Coordinate\"\n head[3] = \"Y-Coordinate\"\n head[4] = \"Z-Coordinate\"\n content = PMatrix(len(self.Nodaldata),4)\n for i in range(1,len(self.Nodaldata)+1):\n content[i,1] = self.Nodaldata[i].GetNodeNo()\n temp = self.Nodaldata[i].GetValues()\n content[i,2] = '{:.2f}'.format(temp[1])\n content[i,3] = '{:.2f}'.format(temp[2])\n content[i,4] = '{:.2f}'.format(temp[3])\n self.outputfile.Table(title,head,content)\n\n #Printing nodal fixities\n title = \"NODAL FIXITIES (1 = FIXED : 0 = FREE)\"\n head = PVector(4)\n head[1] = \"Node\"\n head[2] = \"X-Fixity\"\n head[3] = \"Y-Fixity\"\n head[4] = \"Z-Fixity\"\n content = PMatrix(len(self.Boundarydata),4)\n for i in range(1,len(self.Boundarydata)+1):\n content[i,1] = self.Boundarydata[i].GetBNode()\n temp = self.Boundarydata[i].GetBC()\n content[i,2] = '{:.0f}'.format(temp[1])\n content[i,3] = '{:.0f}'.format(temp[2])\n content[i,4] = '{:.0f}'.format(temp[3])\n self.outputfile.Table(title,head,content)\n\n #Printing nodal forces\n title = \"NODAL FORCES\"\n head = PVector(4)\n head[1] = \"Node\"\n head[2] = \"X-Force\"\n head[3] = \"Y-Force\"\n head[4] = \"Z-Force\"\n content = PMatrix(len(self.Nodalloaddata),4)\n for i in range(1,len(self.Nodalloaddata)+1):\n content[i,1] = self.Nodalloaddata[i].GetNode()\n temp = self.Nodalloaddata[i].Getloads()\n content[i,2] = '{:.2f}'.format(temp[1])\n content[i,3] = '{:.2f}'.format(temp[2])\n content[i,4] = '{:.2f}'.format(temp[3])\n self.outputfile.Table(title,head,content)\n \n #Printing element data\n title = \"ELEMENT DATA\"\n head = PVector(5)\n head[1] = \"Element\"\n head[2] = \"SN\"\n head[3] = \"EN\"\n head[4] = \"Section ID\"\n head[5] = \"Material ID\"\n content = PMatrix(len(self.Elementdata),5)\n for i in range(1,len(self.Elementdata)+1):\n content[i,1] = self.Elementdata[i].GetElementNo()\n temp = self.Elementdata[i].GetValues()\n content[i,2] = '{:.0f}'.format(temp[1])\n content[i,3] = '{:.0f}'.format(temp[2])\n content[i,4] = '{:.0f}'.format(temp[3])\n content[i,5] = '{:.0f}'.format(temp[4])\n \n self.outputfile.Table(title,head,content)\n\n #Printing material data\n title = \"MATERIAL DATA\"\n head = PVector(4)\n head[1] = \"MATERIAL ID\"\n head[2] = \"ELASTIC_MOD\"\n head[3] = \"POISSON_RATIO\"\n head[4] = \"THERMAL_COEFF\"\n content = PMatrix(len(self.Materialdata),4)\n for i in range(1,len(self.Materialdata)+1):\n content[i,1] = self.Materialdata[i].GetMaterialNo()\n temp = self.Materialdata[i].GetValues()\n content[i,2] = '{:.1e}'.format(temp[1])\n content[i,3] = '{:.1e}'.format(temp[2])\n content[i,4] = '{:.1e}'.format(temp[3])\n self.outputfile.Table(title,head,content)\n \n #Printing section data\n title = \"SECTION DATA\"\n head = PVector(5)\n temp = PVector(4)\n head[1] = \"SECTION ID\"\n head[2] = \"AREA\"\n head[3] = \"INERTIA_(IXX)\"\n head[4] = \"INERTIA_(IYY)\"\n head[5] = \"INERTIA_(IZZ)\"\n content = PMatrix(len(self.Sectiondata),5)\n for i in range(1,len(self.Sectiondata)+1):\n content[i,1] = self.Sectiondata[i].GetSectionNo()\n temp[1],temp[2],temp[3],temp[4] = self.Sectiondata[i].GetValues()\n content[i,2] = '{:.1e}'.format(temp[1])\n content[i,3] = '{:.1e}'.format(temp[2])\n content[i,4] = '{:.1e}'.format(temp[3])\n content[i,5] = '{:.1e}'.format(temp[4])\n self.outputfile.Table(title,head,content)\n \n #Printing nodal displacement data\n title = \"NODAL DISPLACEMENTS\"\n head = PVector(4)\n temp = PVector(3)\n head[1] = \"NODE\"\n head[2] = \"X-DISP\"\n head[3] = \"Y-DISP\"\n head[4] = \"Z-DISP\"\n content = PMatrix(len(self.NodalRdata),4)\n for i in range(1,len(self.NodalRdata)+1):\n content[i,1] = self.NodalRdata[i].GetNode()\n temp[1],temp[2],temp[3] = self.NodalRdata[i].GetValues()\n content[i,2] = '{:.1e}'.format(temp[1])\n content[i,3] = '{:.1e}'.format(temp[2])\n content[i,4] = '{:.1e}'.format(temp[3])\n self.outputfile.Table(title,head,content)\n\n #Printing element response data\n title = \"ELEMENT RESPONSE\"\n head = PVector(4)\n temp = PVector(3)\n head[1] = \"ELEMENT\"\n head[2] = \"STRAIN\"\n head[3] = \"STRESS\"\n head[4] = \"FORCE\"\n content = PMatrix(len(self.ElementRdata),4)\n for i in range(1,len(self.ElementRdata)+1):\n content[i,1] = self.ElementRdata[i].GetElement()\n temp[1],temp[2],temp[3] = self.ElementRdata[i].GetValues()\n content[i,2] = '{:.1e}'.format(temp[1])\n content[i,3] = '{:.1e}'.format(temp[2])\n content[i,4] = '{:.1e}'.format(temp[3])\n self.outputfile.Table(title,head,content)\n \n #Printing nodal reaction data\n title = \"NODAL REACTIONS\"\n head = PVector(4)\n temp = PVector(3)\n head[1] = \"NODE\"\n head[2] = \"RX\"\n head[3] = \"RY\"\n head[4] = \"RZ\"\n content = PMatrix(len(self.NodalRdata),4)\n for i in range(1,len(self.NodalRdata)+1):\n content[i,1] = self.NodalRdata[i].GetNode()\n temp[1],temp[2],temp[3] = self.NodalRdata[i].GetReaction()\n content[i,2] = '{:.1f}'.format(temp[1])\n content[i,3] = '{:.1f}'.format(temp[2])\n content[i,4] = '{:.1f}'.format(temp[3])\n self.outputfile.Table(title,head,content)\n \n\n\n\n self.outputfile.Close()\n \n","sub_path":"Python/FileIO.py","file_name":"FileIO.py","file_ext":"py","file_size_in_byte":11350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"104064128","text":"import argparse\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\nfrom esn import ESN\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-t\", \"--data-type\", type=str, default=\"ig\", dest=\"t\", help=\"data type (mg/dnf/ig)\")\nparser.add_argument(\"-i\", \"--input-file\", type=str, dest=\"i\", help=\"spectral radius\")\nparser.add_argument(\"-s\", \"--spectral-radius\", type=float, default=1.5, dest=\"s\", help=\"spectral radius\")\nparser.add_argument(\"-u\", \"--units\", type=int, default=4096, dest=\"u\", help=\"number of reservoir units\")\nparser.add_argument(\"-tl\", \"--trainlen\", type=int, default=1000, dest=\"trainlen\", help=\"number of reservoir units\")\nparser.add_argument(\"-p\", \"--predlen\", type=int, default=1000, dest=\"predlen\", help=\"number of reservoir units\")\n\nargs = parser.parse_args()\n\nif not args.i:\n print('[ERROR] use -i FILE_NAME to set an input file')\n exit()\n\ndata = np.load(args.i)\nprint('loaded %d points' % len(data))\n\nif args.t == 'dnf':\n data = np.log(data)\nelif args.t == 'ig':\n data = data[0::2]/10000 - 1.1\n\nesn = ESN(n_in=1,\n n_fb=1,\n n_units=args.u,\n spectral_radius=args.s)\n\ntrainlen = args.trainlen\npredlen = args.predlen\nprint('using: %d, predicting: %d' % (trainlen, predlen))\n\nprint('fitting...')\nfit = esn.fit(np.ones(trainlen), data[:trainlen])\nprint('predicting...')\npred = esn.predict(np.ones(predlen), cont=True)\n\nif args.t == 'dnf':\n data = np.exp(data)\n pred = np.exp(pred)\nelif args.t == 'ig':\n data = data+1.1 * 10000\n pred = pred+1.1 * 10000\n\nplt.figure(figsize=(200,30))\nend = trainlen + predlen + 1\ndatalen = min(end, len(data))\nplt.plot(range(0, datalen), data[0:datalen], '--bo', linestyle=\"solid\", linewidth=3, label=\"actual\", alpha=0.5)\nplt.plot(range(trainlen+1, end), pred, '--ro', linestyle=\"solid\", linewidth=3, label=\"prediction\", alpha=0.5)\n\nfn = './%s_%d_%s.png' % (args.t, args.u, str(args.s))\nprint('saving %s' % fn)\nplt.savefig(fn)\n","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"29336000","text":"import ROOT\nimport os\nimport numpy as np\nimport math\nROOT.PyConfig.IgnoreCommandLineOptions = True\n\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module\nfrom TauPOG.TauIDSFs.TauIDSFTool import TauIDSFTool\n\nclass lepSFProducer(Module):\n \"\"\" This module is copied from the NanoAOD-tools,\n but developed for lastest SUSY Lepton ID\n \"\"\"\n def __init__(self, era, muonSelectionTag=\"Loose\", electronSelectionTag=\"Veto\", \n photonSelectionTag=\"Loose\", tauSelectionTag=\"Medium\"):\n self.era = era\n self.muonSelectionTag = muonSelectionTag\n self.electronSelectionTag = electronSelectionTag\n self.photonSelectionTag = photonSelectionTag\n self.tauSelectionTag = tauSelectionTag\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Muon ~~~~~\n # Only looseID miniIso SF existed for 2016\n mu_f =[]\n mu_h =[]\n if self.era == \"2016\":\n mu_f+= [ \"Muon_IDScaleFactor_wSys_%sGH.root\" % self.era, \n \"Muon_LooseID_MiniIso0p2SF_2016.root\"\n ]\n mu_h += [\"NUM_%sID_DEN_genTracks_eta_pt\" % self.muonSelectionTag, \n \"SF\"\n ]\n if self.era == \"2017\":\n mu_f+= [ \"Muon_IDScaleFactor_wSys_%s.root\" % self.era, \n \"Muon_%sID_MiniIso0p2SF_%s.root\" % (self.muonSelectionTag, self.era)\n ]\n mu_h += [\"NUM_%sID_DEN_genTracks_pt_abseta\" % self.muonSelectionTag, \n \"TnP_MC_NUM_MiniIso02Cut_DEN_%sID_PAR_pt_eta\" % self.muonSelectionTag\n ]\n elif self.era == \"2018\":\n ## SUSY recommend to use the 2017 Data/FullSim SFs for MiniIso also\n ## for 2018, as no changes are expected and these SFs are very close to 1. \n mu_f+= [ \"Muon_IDScaleFactor_wSys_%s.root\" % self.era, \n \"Muon_%sID_MiniIso0p2SF_%s.root\" % (self.muonSelectionTag, \"2017\")\n ]\n mu_h += [\"NUM_%sID_DEN_TrackerMuons_pt_abseta\" % self.muonSelectionTag, \n \"TnP_MC_NUM_MiniIso02Cut_DEN_%sID_PAR_pt_eta\" % self.muonSelectionTag\n ]\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Electron ~~~~~\n if self.era == \"2016\":\n el_f = [ \"Electron_GT20GeV_RecoSF_2017v2ID_Run%s.root\" % self.era,\n \"Electron_LT20GeV_RecoSF_2017v2ID_Run%s.root\" % self.era,\n \"Electron_SUSYScaleFactors_2017v2ID_Run%s.root\" % self.era,\n \"Electron_SUSYScaleFactors_2017v2ID_Run%s.root\" % self.era\n ]\n el_h = [\"EGamma_SF2D\",\n \"EGamma_SF2D\",\n \"Run%s_CutBased%sNoIso94XV2\" % (self.era, self.electronSelectionTag),\n \"Run%s_Mini\" % self.era\n ]\n elif self.era == \"2017\":\n el_f = [ \"Electron_GT20GeV_RecoSF_2017v2ID_Run%s.root\" % self.era,\n \"Electron_LT20GeV_RecoSF_2017v2ID_Run%s.root\" % self.era,\n \"Electron_SUSYScaleFactors_2017v2ID_Run%s.root\" % self.era,\n \"Electron_SUSYScaleFactors_2017v2ID_Run%s.root\" % self.era\n ]\n el_h = [\"EGamma_SF2D\",\n \"EGamma_SF2D\",\n \"Run%s_CutBased%sNoIso94XV2\" % (self.era, self.electronSelectionTag),\n \"Run%s_MVAVLooseTightIP2DMini\" % self.era\n ]\n elif self.era == \"2018\":\n el_f = [ \"Electron_GT10GeV_RecoSF_2017v2ID_Run%s.root\" % self.era,\n \"Electron_SUSYScaleFactors_2017v2ID_Run%s.root\" % self.era,\n \"Electron_SUSYScaleFactors_2017v2ID_Run%s.root\" % self.era\n ]\n el_h = [\"EGamma_SF2D\",\n \"Run%s_CutBased%sNoIso94XV2\" % (self.era, self.electronSelectionTag),\n \"Run%s_Mini\" % self.era\n ]\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Photon ~~~~~\n\n # The production used 2016v2 ID for 2016 photon\n if self.era == \"2016\":\n pho_f =[\"Photon_%s_80XCutbased_%s.root\"% (self.photonSelectionTag, self.era)]\n pho_h = [\"EGamma_SF2D\"]\n else:\n pho_f = [\"Photon_%s_2017v2Cutbased_%s.root\" % (self.photonSelectionTag, self.era)]\n pho_h = [\"EGamma_SF2D\"]\n\n # In addition to ID scale factors, analysis using it should \n # apply the electron veto scale factors\n\n\n if self.era == \"2016\":\n eleveto_f =[ \"ElectronVeto_ScaleFactors_80X_2016.root\" ]\n eleveto_h = [\"Scaling_Factors_HasPix_R9 Inclusive\"]\n else:\n eleveto_f =[ \"ElectronVeto_PixelSeed_ScaleFactors_2017.root\" ]\n eleveto_h = [\"%s_ID\" % self.photonSelectionTag]\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tau ~~~~~\n # Update to new TauPOG era mapping \n self.TauPOGEraMap= {\n \"2016\":\"2016Legacy\",\n \"2017\":\"2017ReReco\",\n \"2018\":\"2018ReReco\",\n }\n self.tauSFTool = TauIDSFTool(self.TauPOGEraMap[self.era],'MVAoldDM2017v2',self.tauSelectionTag)\n\n\n mu_f = [\"%s/src/PhysicsTools/NanoSUSYTools/data/leptonSF/\" % os.environ['CMSSW_BASE'] + f for f in mu_f]\n el_f = [\"%s/src/PhysicsTools/NanoSUSYTools/data/leptonSF/\" % os.environ['CMSSW_BASE'] + f for f in el_f]\n pho_f = [\"%s/src/PhysicsTools/NanoSUSYTools/data/leptonSF/\" % os.environ['CMSSW_BASE'] + f for f in pho_f]\n eleveto_f = [\"%s/src/PhysicsTools/NanoSUSYTools/data/leptonSF/\" % os.environ['CMSSW_BASE'] + f for f in eleveto_f]\n\n self.mu_f = ROOT.std.vector(str)(len(mu_f))\n self.mu_h = ROOT.std.vector(str)(len(mu_f))\n for i in range(len(mu_f)): self.mu_f[i] = mu_f[i]; self.mu_h[i] = mu_h[i];\n self.el_f = ROOT.std.vector(str)(len(el_f))\n self.el_h = ROOT.std.vector(str)(len(el_f))\n for i in range(len(el_f)): self.el_f[i] = el_f[i]; self.el_h[i] = el_h[i];\n self.pho_f = ROOT.std.vector(str)(len(pho_f))\n self.pho_h = ROOT.std.vector(str)(len(pho_f))\n for i in range(len(pho_f)): self.pho_f[i] = pho_f[i]; self.pho_h[i] = pho_h[i];\n self.eleveto_f = ROOT.std.vector(str)(len(eleveto_f))\n self.eleveto_h = ROOT.std.vector(str)(len(eleveto_f))\n for i in range(len(eleveto_f)): self.eleveto_f[i] = eleveto_f[i]; self.eleveto_h[i] = eleveto_h[i];\n\n for library in [ \"libCondFormatsJetMETObjects\", \"libPhysicsToolsNanoAODTools\" ]:\n if library not in ROOT.gSystem.GetLibraries():\n print(\"Load Library '%s'\" % library)\n ROOT.gSystem.Load(library)\n\n \n\n def beginJob(self):\n self._worker_mu = ROOT.LeptonEfficiencyCorrector(self.mu_f,self.mu_h)\n self._worker_el = ROOT.LeptonEfficiencyCorrector(self.el_f,self.el_h)\n self._worker_pho = ROOT.LeptonEfficiencyCorrector(self.pho_f,self.pho_h)\n self._worker_eleveto = ROOT.LeptonEfficiencyCorrector(self.eleveto_f,self.eleveto_h)\n def endJob(self):\n pass\n\n def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n self.out = wrappedOutputTree\n self.out.branch(\"Muon_%sSF\" % self.muonSelectionTag , \"F\" , \\\n lenVar=\"nMuon\", title=\"Muon scale factor per Muon\")\n self.out.branch(\"Muon_%sSFErr\" % self.muonSelectionTag , \"F\" , \\\n lenVar=\"nMuon\", title=\"Muon scale factor error per Muon\")\n self.out.branch(\"Electron_%sSF\" % self.electronSelectionTag , \"F\" , \\\n lenVar=\"nElectron\", title=\"Reco+ID scale factor error per Electron\")\n self.out.branch(\"Electron_%sSFErr\" % self.electronSelectionTag , \"F\" , \\\n lenVar=\"nElectron\", title=\"Reco+ID scale factor per Electron\")\n self.out.branch(\"Photon_%sSF\" % self.photonSelectionTag , \"F\" , \\\n lenVar=\"nPhoton\", title=\"ID scale factor per Photon\")\n self.out.branch(\"Photon_%sSFErr\" % self.photonSelectionTag , \"F\" , \\\n lenVar=\"nPhoton\", title=\"ID scale factor error per Photon\")\n self.out.branch(\"Tau_%sSF\" % self.tauSelectionTag , \"F\" , \\\n lenVar=\"nTau\", title=\"ID scale factor per Tau\")\n self.out.branch(\"Tau_%sSF_Up\" % self.tauSelectionTag , \"F\" , \\\n lenVar=\"nTau\", title=\"ID scale factor up error per Tau\")\n self.out.branch(\"Tau_%sSF_Down\" % self.tauSelectionTag , \"F\" , \\\n lenVar=\"nTau\", title=\"ID scale factor down error per Tau\")\n\n def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n pass\n\n def analyze(self, event):\n \"\"\"process event, return True (go to next module) or False (fail, go to next event)\"\"\"\n muons = Collection(event, \"Muon\")\n electrons = Collection(event, \"Electron\")\n photons = Collection(event, \"Photon\")\n taus = Collection(event, \"Tau\")\n gens = Collection(event, \"GenPart\")\n\n sf_el = [ self._worker_el.getSF(el.pdgId,el.pt,el.eta) for el in electrons ]\n if self.era == \"2016\":\n sf_mu = [ self._worker_mu.getSF(12,mu.pt,mu.eta) for mu in muons ]\n else:\n sf_mu = [ self._worker_mu.getSF(mu.pdgId,mu.pt,mu.eta) for mu in muons ]\n sf_pho = [ self._worker_pho.getSF(pho.pdgId,pho.pt,pho.eta) for pho in photons ]\n\n sferr_el = [ self._worker_el.getSFErr(el.pdgId,el.pt,el.eta) for el in electrons ]\n if self.era == \"2016\":\n sferr_mu = [ self._worker_mu.getSFErr(12,mu.pt,mu.eta) for mu in muons ]\n else:\n sferr_mu = [ self._worker_mu.getSFErr(mu.pdgId,mu.pt,mu.eta) for mu in muons ]\n sferr_pho = [ self._worker_pho.getSFErr(pho.pdgId,pho.pt,pho.eta) for pho in photons ]\n\n sferr_el = [a/b for a, b in zip(sferr_el, sf_el)] \n sferr_mu = [a/b for a, b in zip(sferr_mu, sf_mu)] \n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Photon ~~~~~\n sf_eleveto = []\n sferr_eleveto= []\n if self.era == \"2016\":\n sf_eleveto = [ self._worker_eleveto.getSF(pho.pdgId,pho.pt,abs(pho.eta)) for pho in photons ]\n sferr_eleveto = [ self._worker_eleveto.getSFErr(pho.pdgId,pho.pt,abs(pho.eta)) for pho in photons ]\n else:\n for pho in photons:\n binx = 1 if pho.r9 > 0.94 else 2\n binx = binx + 3 if pho.isScEtaEE else binx \n sf_eleveto.append(self._worker_eleveto.getSF(pho.pdgId, 0, binx ))\n sferr_eleveto.append(self._worker_eleveto.getSFErr(pho.pdgId, 0, binx ))\n ssf_pho = [a*b for a, b in zip(sf_pho, sf_eleveto)] \n ssferr_pho = [ math.sqrt(((serrp/sp)** 2 + (serre/se)**2))/ ssf if ssf !=0 else 0 for sp, se, serrp, serre, ssf in zip(sf_pho, sf_eleveto, sferr_pho, sferr_eleveto, ssf_pho) ] \n\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tau ~~~~~\n sf_tau = []\n sf_tau_up = []\n sf_tau_down = []\n for tau in taus:\n genmatch = -1\n ## genPartFlave: 1 = prompt electron, 2 = prompt muon, 3 = tau->e decay, \n ## 4 = tau->mu decay, 5 = hadronic tau decay, 0 = unknown or unmatched\n if tau.genPartFlav >= 3 :\n genmatch = 5\n sf_tau.append(self.tauSFTool.getSFvsPT(tau.pt, genmatch))\n sf_tau_up.append(self.tauSFTool.getSFvsPT(tau.pt, genmatch, unc=\"Up\"))\n sf_tau_down.append(self.tauSFTool.getSFvsPT(tau.pt, genmatch, unc=\"Down\"))\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Photon Electron Veto SF ~~~~~\n\n\n self.out.fillBranch(\"Muon_%sSF\" % self.muonSelectionTag , sf_mu)\n self.out.fillBranch(\"Muon_%sSFErr\" % self.muonSelectionTag , sferr_mu)\n self.out.fillBranch(\"Electron_%sSF\" % self.electronSelectionTag , sf_el)\n self.out.fillBranch(\"Electron_%sSFErr\" % self.electronSelectionTag , sferr_el)\n self.out.fillBranch(\"Photon_%sSF\" % self.photonSelectionTag , ssf_pho)\n self.out.fillBranch(\"Photon_%sSFErr\" % self.photonSelectionTag , ssferr_pho)\n self.out.fillBranch(\"Tau_%sSF\" % self.tauSelectionTag , sf_tau)\n self.out.fillBranch(\"Tau_%sSF_Up\" % self.tauSelectionTag , sf_tau_up)\n self.out.fillBranch(\"Tau_%sSF_Down\" % self.tauSelectionTag , sf_tau_down)\n return True\n\n# define modules using the syntax 'name = lambda : constructor' to avoid having them loaded when not needed\n# lepSF = lambda : lepSFProducer( \"LooseWP_2016\", \"GPMVA90_2016\")\n\n","sub_path":"python/modules/lepSFProducer.py","file_name":"lepSFProducer.py","file_ext":"py","file_size_in_byte":12844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"214746713","text":"#run_model.py\n#Runs the OCR model.\n#Portions of this code modified and adapted for use in this context from:\n#https://nicholastsmith.wordpress.com/2017/10/14/deep-learning-ocr-using-tensorflow-and-python/\n\nimport numpy as np\nimport os\nimport string\nimport sys\nfrom skimage.io import imread\nfrom sklearn.model_selection import ShuffleSplit\nfrom conv_net import ConvNet\nfrom rnn_text import RNNNet\nimport copy\n\nTT_SPLIT = 900\nIMAGE_SIZE = [15, 640, 3] #H, W, C\nchar_to_ind = {}\nind_to_char = {}\n \ndef loadImages(path = '.'):\n label_file = os.path.join(path, 'dataset/Train.csv')\n image_matrix, label_matrix = [], []\n with open(label_file) as file:\n for line in file:\n image_path, label = line.strip().split(',')\n image_matrix.append(imread(path + \"/dataset/\" + image_path))\n label_matrix.append(list(label) + [' '] * (64 - len(label))) #Pad strings with spaces\n return np.stack(image_matrix), np.stack(label_matrix)\n\ndef fixY():\n top_ind = 0\n for char in string.ascii_letters[:26]:\n char_to_ind[char] = ord(char) - 97 + top_ind\n ind_to_char[ord(char) - 97 + top_ind] = char\n top_ind += len(string.ascii_letters[:26])\n for char in string.ascii_letters[26:]:\n char_to_ind[char] = ord(char) - 65 + top_ind\n ind_to_char[ord(char) - 65 + top_ind] = char\n top_ind += len(string.ascii_letters[26:])\n for char in string.digits:\n char_to_ind[char] = ord(char) - 48 + top_ind\n ind_to_char[ord(char) - 48 + top_ind] = char\n top_ind += len(string.digits)\n char_to_ind[' '] = top_ind\n ind_to_char[top_ind] = ' '\n\nif __name__ == \"__main__\":\n #define character to index mapping\n fixY()\n vocab_size = len(char_to_ind)\n\n #define the convolutional model\n model = ConvNet(input_size=IMAGE_SIZE, \n output_size=vocab_size,\n filter_size=[[5, 5, vocab_size // 2], [4, 4, vocab_size], [3, 5, vocab_size]],\n strides=[[1, 2], [3, 1], [5, 5]],\n pooling_schedule=[])\n \n A, Y = loadImages()\n \n #vectorize labels\n fixed_y = []\n for inst in Y:\n entry = []\n for c in inst:\n entry.append(char_to_ind[c])\n fixed_y.append(entry)\n fixed_y = np.stack(fixed_y)\n\n #define train/test split\n X_train = A[:TT_SPLIT]\n y_train = fixed_y[:TT_SPLIT]\n X_val = A[TT_SPLIT:]\n y_val = fixed_y[TT_SPLIT:]\n\n #train convolutional model\n trace = model.train(X_train, y_train, X_val, y_val,\n learning_rate=1e-3, \n reg=np.float32(5e-6), \n num_iters=1000,\n batch_size=5, \n verbose=True)\n \n print(\"TRAIN ACC: \" + str(trace['train_acc_history'][-1]))\n print(\"VAL ACC: \" + str(trace['val_acc_history'][-1]))\n\n scores = model.predict_scores(X_val)\n\n #normalize score logits\n corrected = []\n for image in scores:\n new_image = []\n for character in image:\n total = np.sum(character)\n new_image.append(np.divide(character, total))\n corrected.append(new_image)\n scores = corrected\n\n #define RNN model\n rnn_model = RNNNet()\n\n predicted_labels = model.predict(X_val)\n corrected_predictions = []\n #predict output\n for image_idx in range (0, len(predicted_labels)):\n #predict output of convolutional network\n image = predicted_labels[image_idx]\n final_string = \"\"\n for char_idx in image:\n final_string += ind_to_char[char_idx]\n print(\"ORIGINAL: \" + str(final_string))\n\n #process through RNN\n string_so_far = final_string[0]\n cur_to_process = final_string[0]\n origcnt, newcnt = 0, 0\n for char_idx in range (1, len(final_string)):\n max_confidence = np.max(scores[image_idx][char_idx])\n ocr_predicted_char = ind_to_char[np.argmax(scores[image_idx][char_idx])]\n #split predictions by word\n if (ocr_predicted_char == \" \"):\n cur_to_process = \"\"\n string_so_far += \" \"\n origcnt += 1\n else:\n #only redefine a character if confidence is low\n if (max_confidence < 0.25) and (cur_to_process != \"\"):\n newcnt += 1\n new_char = rnn_model.generate_text(cur_to_process, scores[image_idx][char_idx])\n #don't allow a space to be predicted\n if (new_char == \" \"):\n new_char = ocr_predicted_char\n #otherwise, just use the prediction from the convolutional network\n else:\n origcnt += 1\n new_char = ocr_predicted_char\n cur_to_process += new_char\n string_so_far += new_char\n\n #convert final string to array of indices\n fixed_y = []\n for c in string_so_far:\n fixed_y.append(char_to_ind[c])\n \n corrected_predictions.append(fixed_y)\n\n #print corrected string from RNN output\n print(\"CORRECTED(\" + str(origcnt) + \",\" + str(newcnt) + \"): \" + str(string_so_far))\n \n new_val_acc = np.float32(corrected_predictions == y_val).mean()\n print(\"NEW VALIDATION ACCURACY: \" + str(new_val_acc))\n","sub_path":"run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"424226027","text":"import sys\nimport day00_6_functions \n\n\ndef main():\n print(\"Please select an option by typing the corresponding number:\" ,\"\\n\", \\\n \"1: Add a recipe (name, ingredients, meal, prep_time)\" ,\"\\n\",\\\n \"2: Delete a recipe\" ,\"\\n\", \\\n \"3: Print a recipe\" ,\"\\n\", \\\n \"4: Print the cookbook\" ,\"\\n\", \\\n \"5: Quit\")\n \n while True:\n\n x = input()\n #need to be a string\n if x == \"1\":\n print(\"Please enter the recipe's name, ingredients, meal, prep_time (with a space between each element):\")\n a, b, c, d = input().split()\n day00_6_functions.add_recipe(a,b,c,d)\n \n elif x == \"2\":\n \n z = input(\"Please enter the recipe's name you want to remove from the cookbook: \")\n day00_6_functions.delete_recipe(z)\n elif x == \"3\":\n y = input(\"Please enter the recipe's name to get its details: \")\n day00_6_functions.print_recipe(y)\n elif x == \"4\":\n day00_6_functions.print_cookbook()\n elif x == \"5\":\n print(\"Cookbook closed.\")\n break\n else:\n print(\"This option does not exist, please type the corresponding number.\")\n print(\"To exit, enter 5.\")\n\n\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"day00/ex06/day00_6.py","file_name":"day00_6.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"304083151","text":"import sys\r\nimport time\r\nimport nltk\r\nfrom nltk import word_tokenize\r\nfrom urllib import request\r\nimport re\r\nfrom os import path\r\nfrom urllib import parse\r\nfrom _custom import *\r\n\r\n\ndef search_wiki(title):\r\n\ttitle = re.sub(\" \", \"+\", title)\r\n\turl = \"https://en.wikipedia.org/w/index.php?sort=relevance&search=song+\"+title\r\n\t\r\n\ttry:\r\n\t\tresponse = request.urlopen(url)\r\n\texcept:\r\n\t\tprint(f\"Failed. Trying again in 5 seconds ({url})\")\r\n\t\ttime.sleep(5)\r\n\t\ttry:\r\n\t\t\tresponse = request.urlopen(url)\r\n\t\texcept:\r\n\t\t\tprint(\"Failed again. Skipping.\")\r\n\t\t\treturn False\r\n\r\n\traw = response.read().decode('utf8')\r\n\traw = raw[raw.index(\"
\")+38:]\r\n\traw = raw[raw.index('Released')+14:]\r\n\t\tyear = year[:year.index('')]\r\n\t\tyear = int(year[-4:])\r\n\t\traw = raw[raw.index('>Genre')+10:]\r\n\t\tgenre = raw[:raw.index('')]\r\n\t\tgenre = re.sub(\"[\\[\\<].*?[\\>\\]]\", \"\", genre).strip()\r\n\t\tgenre = re.sub(\"\\n\", \", \", genre)\r\n\t\treturn year, genre\r\n\texcept:\r\n\t\tprint(\"Could not find expected infor on url\", url)\r\n\t\treturn False","sub_path":"Unused/wiki_scaper.py","file_name":"wiki_scaper.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"557506421","text":"#!/bin/env python3.5\nimport psycopg2\n\nDBNAME = 'news'\n\nq1 = ('select articles.title, count(log.path) as requestcount '\n 'from articles left join log '\n 'on (\\'/article/\\' || articles.slug) = log.path '\n 'group by articles.title '\n 'order by requestcount desc limit 3;') \nq2 = ('select authors.name, count(log.path) as viewcount '\n 'from authors left join articles '\n 'on authors.id = articles.author '\n 'left join log '\n 'on (\\'/article/\\' || articles.slug) = log.path '\n 'group by authors.name '\n 'order by viewcount desc limit 3;')\nq3 = ('select authors.name, count(log.path) as viewcount '\n 'from authors left join articles '\n 'on authors.id = articles.author '\n 'left join log '\n 'on (\\'/article/\\' || articles.slug) = log.path '\n 'group by authors.name '\n 'order by viewcount desc limit 3;')\n\ns1 = \"Top 3 most popular articles:\"\ns2 = \"Top 3 most popular articles:\"\ns3 = \"Days with error rate greater than 1%:\"\n\na1and2 = ' views'\na3 = '%% errors'\n\ndef get_data(q, title, action):\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(q)\n results = c.fetchall()\n db.close()\n print(title)\n for result in results:\n print('\"{}\" - {}{}'.format(result[0], result[1], action))\n\n# Run analysis\nget_data(q1, s1, a1and2)\nprint('\\n')\nget_data(q2, s2, a1and2)\nprint('\\n')\nget_data(q3, s3, a3)\n","sub_path":"logsanalysis2.py","file_name":"logsanalysis2.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"289342083","text":"import scipy.io as scio\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport cv2 as cv\n\n\ndef feedforward(w,a,x):\n # sigmoid 激活函数\n f = lambda s: 1 / (1 + np.exp(-s))\n\n w = np.array(w)\n temp = np.array(np.concatenate((a,x),axis=0))\n z_next = np.dot(w , temp)\n\n return f(z_next), z_next\n\ndef backprop(w,z,delta_next):\n\n # sigmoid 激活函数\n f = lambda s: np.array(1 / (1 + np.exp(-s)))\n\n # sigmoid 激活函数的导数\n df = lambda s: f(s) * (1 - f(s))\n\n delta = df(z) * np.dot(w.T,delta_next)\n\n return delta\n\nDataSet = scio.loadmat('yaleB_face_dataset.mat')\nunlabeledData = DataSet['unlabeled_data']\n\ndataset_size = 80 # 我们所准备无标签的人脸图片数据数量\nunlabeled_data = np.zeros(unlabeledData.shape)\n\n# 利用 z-score 归一化方法归一数据\nfor i in range(dataset_size):\n tmp = unlabeledData[:,i] / 255.\n unlabeled_data[:,i] = (tmp - np.mean(tmp)) / np.std(tmp)\n\nalpha = 0.5 # 学习步长\nmax_epoch = 300 # 自编码器训练总次数\nmini_batch = 10 # 最小批训练时,每次使用 10 个样本同时进行训练\nheight = 48 # 人脸数据图片的高度\nwidth = 42 # 人脸数据图片的宽度\nimgSize = height * width\n\n# 神经网络结构\nhidden_node = 60 # 网络隐藏层节点数目\nhidden_layer = 2\nlayer_struc = [[imgSize, 1],\n [0, hidden_node],\n [0, imgSize]]\nlayer_num = 3 # 网络层次数目\n\n# 初始化无监督网络的权值\nw = []\nfor l in range(layer_num-1):\n w.append(np.random.randn(layer_struc[l+1][1],sum(layer_struc[l])))\n\n# 定义神经网络的外部节点数目\nX = []\nX.append(np.array(unlabeled_data[:,:]))\nX.append(np.zeros((0,dataset_size)))\nX.append(np.zeros((0,dataset_size)))\n\n# 初始化在网络训练过程中,进行误差反向传播所需的 δ\ndelta = []\nfor l in range(layer_num):\n delta.append([])\n\n# 定义结果展示参数\nnRow = max_epoch / 100 + 1\nnColumn = 4\neachFaceNum = 20 # 对于每个人都有 20 张未标记图像数据\n\n# 在第一行中展示原始图像\nfor iImg in range(nColumn):\n ax = plt.subplot(nRow, nColumn, iImg+1)\n plt.imshow(unlabeledData[:,eachFaceNum * iImg + 1].reshape((width,height)).T, cmap= plt.cm.gray)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n#无监督训练\ncount = 0 # 记录训练次数\nprint('Autoencoder training start..')\nfor iter in range(max_epoch):\n\n # 定义随机洗牌下标\n ind = list(range(dataset_size))\n random.shuffle(ind)\n\n a = []\n z = []\n z.append([])\n for i in range(int(np.ceil(dataset_size / mini_batch))):\n a.append(np.zeros((layer_struc[0][1], mini_batch)))\n x = []\n for l in range(layer_num):\n x.append( X[l][:,ind[i*mini_batch : min((i+1)*mini_batch, dataset_size)]])\n\n y = unlabeled_data[:,ind[i*mini_batch:min((i+1)*mini_batch,dataset_size)]]\n for l in range(layer_num-1):\n a.append([])\n z.append([])\n a[l+1],z[l+1] = feedforward(w[l],a[l],x[l])\n\n delta[layer_num-1] = np.array(a[layer_num-1] - y) * np.array(a[layer_num-1])\n delta[layer_num-1] = delta[layer_num-1] * np.array(1-a[layer_num-1])\n\n for l in range(layer_num-2, 0, -1):\n delta[l] = backprop(w[l],z[l],delta[l+1])\n\n for l in range(layer_num-1):\n dw = np.dot(delta[l+1], np.concatenate((a[l],x[l]),axis=0).T) / mini_batch\n w[l] = w[l] - alpha * dw\n\n count = count + 1\n\n # 每训练 100 次展示一次自编码器目前对原始图像的输出结果\n if np.mod(iter+1,100) == 0 :\n b = []\n b.append(np.zeros((layer_struc[0][1],dataset_size)))\n\n for l in range(layer_num-1):\n tempA, tempZ = feedforward(w[l], b[l], X[l])\n b.append(tempA)\n\n for iImg in range(nColumn):\n ax = plt.subplot(nRow,nColumn, iImg + nColumn * (iter+1)/100 + 1)\n tmp = b[layer_num-1][:,eachFaceNum * iImg + 1]\n dis_result = ((tmp * np.std(tmp)) + np.mean(tmp)).reshape(width,height).T\n plt.imshow(dis_result,cmap= plt.cm.gray)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n print('Learning epoch:', count, '/', max_epoch)\n fig2 = plt.figure(2)\n\n # 获得编码结果\n code_result, tempZ = feedforward(w[0], b[0], X[0])\n\n # 展示原始数据图\n for iImg in range(nColumn):\n ax = plt.subplot(2, nColumn, iImg + 1)\n plt.imshow(unlabeled_data[:, eachFaceNum * iImg + 1].reshape((width, height)).T, cmap=plt.cm.gray)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n # 展示对应的编码结果\n for iImg in range(nColumn):\n ax = plt.subplot(2, nColumn, iImg + nColumn + 1)\n plt.imshow(code_result[:, eachFaceNum * iImg + 1].reshape((hidden_node, 1)), cmap=plt.cm.gray)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)","sub_path":"face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"3514563","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nfrom scrapy import Item, Field\n\n\nclass ImdbItem(Item):\n \"\"\"\n This class keeps all the information we need to capture for a\n movie.\n \"\"\"\n # unique identifier assigned by imdb\n link = Field()\n text = Field()\n","sub_path":"scrapy_pedia/scrapy_pedia/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"481701550","text":"from django.contrib.auth import authenticate, login\nfrom django.shortcuts import render, redirect, get_object_or_404\n\nfrom .models import Product, Rating\nfrom .forms import ProductForm, RatingForm\nfrom django.forms.formsets import formset_factory\n\n# Create your views here.\ndef product_list(request):\n # construct a queryset\n qs = Product.objects.products_with_score().select_related('submitter')\n return render(request, \"rating/product_list.html\", {\"products\":qs})\n\ndef product_details(request, pid):\n obj = get_object_or_404(Product, pk=pid)\n return render(\n request,\n \"rating/product_detail.html\",\n {\"product\": obj, },\n )\n\ndef product_list_entry(request):\n if not request.user.is_authenticated():\n # fetch an authenticated user (demo only)\n u = authenticate(username=\"foo\", password=\"123\")\n login(request, u)\n\n ProductFormSet = formset_factory(ProductForm, extra=0)\n \n if request.method == \"POST\":\n formset = ProductFormSet(request.POST)\n if formset.is_valid():\n [Product.objects.create(**x.cleaned_data) for x in formset]\n return redirect('product_list')\n else:\n # set submitter hidden field to logged in user\n # need just 2 forms\n initial = [{\"submitter\":request.user} for x in xrange(2)]\n formset = ProductFormSet(initial=initial)\n \n return render(\n request,\n \"rating/product_list_entry.html\",\n {\n \"formset\":formset,\n },\n )\n\ndef product_rate(request, pid):\n # login the user if he/she is not\n if not request.user.is_authenticated():\n # fetch an authenticated user\n u = authenticate(username=\"foo\", password=\"123\")\n login(request, u)\n\n product = get_object_or_404(Product, pk=pid)\n if request.method == \"POST\":\n form = RatingForm(request.POST)\n if form.is_valid():\n Rating.objects.create(**form.cleaned_data)\n return redirect(form.cleaned_data[\"product\"])\n else:\n form = RatingForm(initial={\"reviewer\":request.user, \"product\":product})\n return render(\n request,\n \"rating/product_rate.html\",\n {\"form\": form, \"product\": product}\n )\n\ndef create_product(request):\n\n # login the user if he/she is not\n if not request.user.is_authenticated():\n # fetch an authenticated user\n u = authenticate(username=\"foo\", password=\"123\")\n login(request, u)\n\n # check for submission\n if request.method == \"POST\":\n # creating bounded form\n # request.POST is just a dict\n form = ProductForm(request.POST) \n # validating\n if form.is_valid():\n # processing on success\n # p = Product.objects.create(\n # name=form.cleaned_data[\"name\"],\n # url=form.cleaned_data.get(\"url\"),\n # submitter=form.cleaned_data.get(\"submitter\"),\n # )\n # return redirect('product_list')\n\n # # or\n\n p = Product.objects.create(**form.cleaned_data)\n return redirect(p)\n else:\n form = ProductForm(\n initial={\"submitter\":request.user}\n )\n\n return render(\n request,\n \"rating/product_create.html\",\n {\"form\": form, }\n )\n\n","sub_path":"rating/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"401088249","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 26 23:57:40 2020\n\n@author: farhad\n\"\"\"\nimport sys\nfrom SignalDto import SignalDto\nfrom Utils import Utils\nfrom time import sleep\nfrom selenium import webdriver\n\n\nclass WSSignals:\n \n \n def __init__(self,driver):\n self.driver= driver\n\n utils= Utils()\n \n\n def createSignalDto(self,msg,chName):\n print('creating signalDto for '+chName+ ' started')\n \n signalDto= SignalDto()\n signalDto.provider = chName\n \n lines=str.splitlines(msg)\n for line in lines :\n \n item=line.lower() \n posEnter=str.find(item,'buy')+str.find(item,'sell')\n posTP=str.find(item,'tp')\n posSL=str.find(item,'sl')\n item= item.split(' ')\n \n if posEnter !=-2 : \n signalDto.symbol=item[0]\n signalDto.enter_type = 1 if item[2] == \"buy\" else 2\n signalDto.enterPrice = item[-1]\n\n elif posSL != -1 :\n signalDto.sl =item[1]\n \n elif posTP != -1 :\n if(signalDto.tp == 0):\n signalDto.tp = item[1]\n elif(signalDto.tp2 == 0):\n signalDto.tp2 = item[1]\n elif(signalDto.tp3 == 0):\n signalDto.tp3 = item[1]\n \n print('creating signalDto for '+chName+ ' finished') \n return {0:signalDto}","sub_path":"backup/13990704/forex-py/forex-py/providers/WSSignals.py","file_name":"WSSignals.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"448064210","text":"#auto-update\nfrom Cat_session import *\nfrom Cat_update import *\nfrom amazon_new import *\nfrom Amazon_list_format import *\nfrom Cat_dbase import *\nfrom text_l import *\nimport time\nfrom loadJson import *\nimport random\n\nclass Asin_update:\n\tdef __init__(self, host ='192.168.5.90', credFile = 'C:\\\\Users\\\\Owner\\\\Documents\\\\Important\\\\catcred.txt',\n\t\tcredFile2 = 'C:\\\\Users\\\\Owner\\\\Documents\\\\Important\\\\cat_cred2.txt',*args):\n\t\tself.jData = loadJson('credFile.json')\n\t\t#database connection\n\t\tself.text_cred = text_l(credFile)\n\t\t#self.dbObject = Db_mngmnt(self.text_cred[2], self.text_cred[3],'asins', host)\n\t\tself.dbObject = Db_mngmnt(self.jData['DB_Username'], self.jData['DB_Password'], 'asins', self.jData['DB_IP'])\n\t\t#catalog database connection\n\t\t#TODO add way to use different credentials using arguments in Cat_dbase and Asin_update\n\t\tself.cat_obj = Cat_dbase(credFile2)\n\t\t#makes object return the product information dicts with the proper key names\n\t\tself.cat_obj.set_proper_desc(True)\n\t\t#catalog update instance\n\t\tself.cat_update_inst = Cat_update(credFile, credFile2)\n\t\t#amazon connection\n\t\tself.amazon_inst = Asin_create()\n\t\t#product information\n\t\tself.__prod_info = []\n\t\tself.__id_check_queue = []\n\t\tself.__barcode_queue = []\n\t\t#product ids that need to get product information\n\t\tself.__id_create_queue = []\n\t\t#for product Ids coupled with asins\n\t\tself.__asin_id_lst = []\n\t\t#used for start_up_all method\n\t\tself.__amazon_online = False\n\t\t#for successful ASIN creates\n\t\tself.__fail_lst = []\n\t\t#for items that have ASINs that need to be retrieved during the ASIN creation process\n\t\tself.__retr_lst = []\n\t\t#\n\t\tself.__keep_live_check = time.time()\n\t\tself.asin_create_timer = 0\n\t\tself.retryCreate = True\n\t\tself.attemptDelay = False\n\t\tself.timeEstimate = 48\n\t\tself.t_url = \"https://catalog.amazon.com/abis/Classify/SelectCategory?itemType=collectible-single-trading-cards&productType=TOYS_AND_GAMES\"\n\tdef start_up_all(self):\n\t\t#self.cat_update_inst.start()\n\t\tself.amazon_inst.start()\n\t\t'''amazon_counter = time.time()\n\t\twhile not self.__amazon_online:\n\t\t\tif self.amazon_inst.url() in [\"https://sellercentral.amazon.com/gp/homepage.html?\", \"https://sellercentral.amazon.com/gp/homepage.html/ref=ag_home_logo_xx\"]:\n\t\t\t\tself.__amazon_online = True\n\t\t\t\tbreak\n\t\t\telif (time.time() - amazon_counter) >= 30:\n\t\t\t\traise RuntimeError(\"You need to log in to the seller central account in the Amazon instance.\")'''\n\tdef standBy(self, interval= 30):\n\t\tprint(\"On standby\")\n\t\twhile(True):\n\t\t\ttime.sleep(interval)\n\t\t\tself.amazon_inst.browser.refresh()\n\tdef get_prod_info(self):\n\t\treturn self.__prod_info\n\tdef set_prod_info(self, x):\n\t\tself.__prod_info = x\n\tdef get_asin_id_lst(self):\n\t\treturn self.__asin_id_lst\n\tdef set_asin_id_lst(self, x):\n\t\tself.__asin_id_lst = x\n\n\n\tdef get_id_queue(self):\n\t\treturn self.__id_check_queue\n\tdef set_id_queue(self,x):\n\t\tself.__id_check_queue = x\n\tdef get_id_create_queue(self):\n\t\treturn self.__id_create_queue\n\tdef set_id_create_queue(self, x):\n\t\tself.__id_create_queue = x\n\n\tdef get_barcode_queue(self):\n\t\treturn self.__barcode_queue\n\tdef set_barcode_queue(self, x):\n\t\tself.__barcode_queue = x\n\tdef get_ids(self, table_name):\n\t\tp_ids = self.dbObject.query(\"SELECT * FROM {0};\".format(str(table_name)))\n\t\tp_ids = [i[0] for i in p_idges]\n\t\tself.set_id_queue(p_ids)\n\tdef get_ids_cat(self, cat_id, asin_filter = True):\n\t\t#takes ids from catalog\n\t\t#used for updating specific categories\n\t\t#by default it only returns those products that don't have ASINs\n\t\tif asin_filter:\n\t\t\tp_ids = self.cat_obj.cat_need_asin(cat_id)\n\t\t\tself.set_id_queue(p_ids)\n\t\telse:\n\t\t\tp_ids = self.cat_obj.get_category_contents(cat_id, True)\n\t\t\tself.set_id_queue(p_ids)\n\n\n\tdef move_ids(self):\n\t\t#assings value of id_queue to id_create_queue, then removes all of the product ids from id_to_check table on database\n\t\t#deletes ids from database\n\t\tp_ids = self.get_id_queue()\n\t\tself.set_id_create_queue(p_ids)\n\n\t\tfor i in p_ids:\n\t\t\tself.dbObject.cust_com(\"DELETE FROM id_to_check WHERE product_id = \\\"{0}\\\";\".format(str(i)))\n\tdef remove_ids(self):\n\t\tfor i in __id_check_queue:\n\t\t\tpass\n\tdef get_fail_lst(self):\n\t\treturn self.__fail_lst\n\tdef set_fail_lst(self, x):\n\t\tself.__fail_lst = x\n\n\tdef get_retr_lst(self):\n\t\treturn self.__retr_lst\n\tdef set_retr_lst(self, x):\n\t\tself.__retr_lst = x\n\tdef db_query(self, x):\n\t\tres = ''\n\t\tresp = self.dbObject.query(x)\n\t\treturn resp\n\tdef __get_id_asin(self, p_id):\n\t\t#returns the contents of the ASIN field for a given product\n\t\tif fn_grab(cat_update_inst.url()) == str(p_id):\n\t\t\twhile cat_update_inst.load_check():\n\t\t\t\ttime.sleep(.5)\n\t\t\td = cat_update_inst.descriptor_get()\n\t\t\treturn d[\"ASIN\"]\n\t\telif fn_grab(cat_update_inst.url()) != str(p_id):\n\t\t\tself.prod_go_to(p_id)\n\t\t\twhile cat_update_inst.load_check():\n\t\t\t\ttime.sleep(.5)\n\t\t\td = cat_update_inst.descriptor_get()\n\t\t\treturn d[\"ASIN\"]\n\n\n\tdef __check_id_asin(self, p_id=''):\n\t\t#returns boolean\n\t\t#checks single p_id to see if it has an ASIN\n\t\t#returns False if it has no ASIN\n\t\t#returns True if it has an ASIN\n\t\td_asin = self.__get_id_asin(p_id)\n\t\tif d_asin == 'Lookup':\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\tdef cat_get_asins(self, x):\n\t\t#retrieves individual asins from catalog, returns a list\n\t\tresults = []\n\t\tfor i in x:\n\t\t\tresults.append(self.__get_id_asin(i))\n\t\treturn results\n\n\tdef __grab_barcodes(self, num):\n\t\tbarcodes = self.dbObject.query(\"SELECT * from barcodes LIMIT {0};\".format(num))\n\t\tif len(barcodes) < num:\n\t\t\traise RuntimeError(\"Only found {0} barcodes.\".format(len(barcodes)))\n\t\treturn barcodes\n\tdef bcode_test(self, num):\n\t\t#remove after testing\n\t\treturn self.__grab_barcodes(num)\n\tdef barcode_dump(self, num):\n\t\tbcodes = self.__grab_barcodes(num)\n\n\tdef import_csv(self, x):\n\t\tprod_ids = dictionarify(x)\n\t\treturn [i[\"Product Id\"] for i in prod_ids]\n\n\tdef get_descriptions(self, get_images = True):\n\t\tself.set_prod_info([])\n\t\tbarcodes_lst = self.__grab_barcodes(len(self.__id_create_queue))\n\t\tself.set_barcode_queue([i[0] for i in barcodes_lst])\n\t\tbcodes = self.get_barcode_queue()\n\t\tp_ids = self.get_id_create_queue()\n\t\tfor i in range(0, len(p_ids)):\n\t\t\t#self.cat_update_inst.prod_go_to(p_ids[i])\n\t\t\tprod_info = self.cat_obj.get_product(p_ids[i])\n\t\t\tdesc = Amzn_lst_single(prod_info)\n\t\t\tdesc.set_dir(self.jData['Photo_Dir'])\n\t\t\tdesc.set_d_opt(get_images)\n\t\t\tdesc = desc.form()\n\t\t\t#desc = Amzn_lst_single(self.cat_update_inst.descriptor_get()).form()\n\t\t\tdesc[\"Barcode\"] = bcodes[i]\n\t\t\tself.__prod_info.append(desc)\n\t\treturn self.get_prod_info()\n\n\tdef create_asins_v2(self, limit = 1):\n\t\t#should keep on trying to add the single item\n\t\tself.__retr_lst = []\n\t\tself.__fail_lst = []\n\t\tstart = time.time()\n\t\tcount = 1\n\t\tn_limit = 0\n\t\tkI = False\n\n\t\tfor i in self.__prod_info:\n\n\t\t\twhile True and not kI:\n\t\t\t\tif self.attemptDelay: time.sleep(30)\n\t\t\t\tif n_limit > limit:\n\t\t\t\t\tprint(\"Retried {0} times with no success\".format(limit))\n\t\t\t\t\tn_limit = 0\n\t\t\t\t\tcount += 1\n\t\t\t\t\tbreak\n\t\t\t\ttry:\n\t\t\t\t\tprint(\"Attempting {0}. #{1} of {2}\".format(i[\"Product Name\"],count, len(self.__prod_info)))\n\t\t\t\texcept UnicodeEncodeError as UE:\n\t\t\t\t\tprint(\"UnicodeEncodeError on #{0} of {1}. Proceeding with ASIN creation\".format(count, len(self.__prod_info)))\n\n\t\t\t\ttry:\n\t\t\t\t\tres = self.amazon_inst.add_single(i, self.t_url)\n\t\t\t\texcept Amazon_Validation_Error as AVE: #untested\n\t\t\t\t\tprint(\"Error occurred:\")\n\t\t\t\t\tprint(AVE)\n\t\t\t\t\tprint(\"Trying again but with EAN\")\n\t\t\t\t\ttry:\n\t\t\t\t\t\ti[\"Barcode Type\"] = self.switch_bcode_type(i[\"Barcode Type\"])\n\t\t\t\t\t\tres = self.amazon_inst.add_single(i,self.t_url)\n\t\t\t\t\texcept Amazon_Validation_Error as AVE:\n\t\t\t\t\t\tself.__fail_lst.append(i)\n\t\t\t\t\t\t#count += 1\n\t\t\t\t\t\tn_limit += 1\n\n\n\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"General Error Occurred with {0}\".format(i[\"Product Name\"]))\n\t\t\t\t\t\tprint(sys.exc_info()[:])\n\t\t\t\t\t\tself.__fail_lst.append(i)\n\t\t\t\t\t\t#count += 1\n\t\t\t\t\t\tn_limit += 1\n\t\t\t\t\t\t#break\n\n\t\t\t\texcept RuntimeError as RE:\n\t\t\t\t\t#while not res and n_limit < limit:\n\t\t\t\t\tif self.AmazonUnavailable():\n\t\t\t\t\t\t#res = self.amazon_inst.add_single(i)\n\t\t\t\t\t\tn_limit += 1\n\n\n\t\t\t\texcept KeyboardInterrupt as KI:\n\t\t\t\t\tkI = True\n\t\t\t\t\tbreak\n\n\t\t\t\texcept:\n\t\t\t\t\t#general\n\n\t\t\t\t\tprint(\"General Error Occurred with {0}\".format(i[\"Product Name\"]))\n\t\t\t\t\tprint(sys.exc_info()[:])\n\t\t\t\t\tself.__fail_lst.append(i)\n\t\t\t\t\tcount += 1\n\t\t\t\t\tbreak\n\n\t\t\t\telse:\n\t\t\t\t\tif res:\n\t\t\t\t\t\tself.__retr_lst.append(i)\n\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.__fail_lst.append(i)\n\t\t\t\t\t\tcount += 1\n\n\t\t#duration = time.time() - self.asin_create_timer()\n\t\t#print(\"Process took {0} seconds to complete.\".format(duration))\n\t\tend = time.time()\n\t\tduration = end - start\n\t\tprint(\"{0} ASINs were created. Failed to create: {1}\".format(len(self.__retr_lst), len(self.__fail_lst)))\n\t\tprint(\"ASIN creation process took {0} seconds\".format(duration))\n\n\tdef create_asins(self):\n\t\tself.__retr_lst = []\n\t\tself.__fail_lst = []\n\t\tstart = time.time()\n\t\tcount = 1\n\t\tfor i in self.__prod_info:\n\t\t\ttry:\n\t\t\t\tprint(\"Attempting {0}. #{1} of {2}\".format(i[\"Product Name\"],count, len(self.__prod_info)))\n\t\t\texcept UnicodeEncodeError as UE:\n\t\t\t\tprint(\"UnicodeEncodeError on #{0} of {1}. Proceeding with ASIN creation\".format(count, len(self.__prod_info)))\n\n\t\t\ttry:\n\t\t\t\tres = self.amazon_inst.add_single(i, self.t_url)\n\t\t\texcept Amazon_Validation_Error as AVE: #untested\n\t\t\t\tprint(\"Error occurred:\")\n\t\t\t\tprint(AVE)\n\t\t\t\tprint(\"Trying again but with EAN\")\n\t\t\t\ttry:\n\t\t\t\t\ti[\"Barcode Type\"] = self.switch_bcode_type(i[\"Barcode Type\"])\n\t\t\t\t\tres = self.amazon_inst.add_single(i,self.t_url)\n\t\t\t\texcept Amazon_Validation_Error as AVE:\n\t\t\t\t\tself.__fail_lst.append(i)\n\t\t\t\t\tcount += 1\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"General Error Occurred with {0}\".format(i[\"Product Name\"]))\n\t\t\t\t\tprint(sys.exc_info()[:])\n\t\t\t\t\tself.__fail_lst.append(i)\n\t\t\t\t\tcount += 1\n\t\t\texcept KeyboardInterrupt as KI:\n\t\t\t\tbreak\n\n\t\t\texcept:\n\t\t\t\t#general\n\n\t\t\t\tprint(\"General Error Occurred with {0}\".format(i[\"Product Name\"]))\n\t\t\t\tprint(sys.exc_info()[:])\n\t\t\t\tself.__fail_lst.append(i)\n\t\t\t\tcount += 1\n\n\t\t\telse:\n\t\t\t\tif res:\n\t\t\t\t\tself.__retr_lst.append(i)\n\t\t\t\t\tcount += 1\n\t\t\t\telse:\n\t\t\t\t\tself.__fail_lst.append(i)\n\t\t\t\t\tcount += 1\n\t\t#duration = time.time() - self.asin_create_timer()\n\t\t#print(\"Process took {0} seconds to complete.\".format(duration))\n\t\tend = time.time()\n\t\tduration = end - start\n\t\tprint(\"{0} ASINs were created. Failed to create: {1}\".format(len(self.__retr_lst), len(self.__fail_lst)))\n\t\tprint(\"ASIN creation process took {0} seconds\".format(duration))\n\tdef asin_create_m_cats(self, cats):\n\t\tself.t_url = \"https://catalog.amazon.com/abis/Classify/SelectCategory?itemType=collectible-single-trading-cards&productType=TOYS_AND_GAMES\"\n\t\tp_ids = []\n\t\tself.cat_obj.reconnect()\n\t\tself.dbObject.reconnect()\n\t\tif not isinstance(cats, list):\n\t\t\traise TypeError(\"Param must be list\")\n\t\tfor i in cats:\n\t\t\t#gets every product id in the category that does not have an ASIN\n\t\t\tself.get_ids_cat(str(i))\n\t\t\tp_ids += self.get_id_queue()\n\t\tself.set_id_create_queue(p_ids)\n\t\t#creates the proper dicts for amazon submission\n\t\tself.get_descriptions()\n\t\t#creates the asins\n\t\tamountOfItems = len(self.get_prod_info())\n\t\tprint(\"{0} items retrieved. Process will take around {1} minute(s)\".format(amountOfItems, (amountOfItems * self.timeEstimate) / 60))\n\t\tif self.retryCreate:\n\t\t\t#calls method that tries to create amazon listing several more times if it errors out the first time\n\t\t\tself.create_asins_v2()\n\t\telse:\n\t\t\tself.create_asins()\n\t\tself.amazon_inst.go_to_search_page()\n\n\t\t#retrieves and then updates the ASIN descriptors in the catalog\n\t\t#also deletes the barcodes that were used from the barcode table in the database\n\t\tself.get_asins()\n\tdef asin_create_s_sp(self, p_ids):\n\t\t#for individual sealed product\n\t\tif not isinstance(p_ids, list): raise TypeError(\"p_ids must be list\")\n\t\tself.cat_obj.reconnect()\n\t\tself.dbObject.reconnect()\n\t\tself.t_url = \"https://catalog.amazon.com/abis/Classify/SelectCategory?itemType=trading-card-games&productType=COLLECTIBLE_CARD&newCategory=622892011/623195011/623204011/485125011&displayPath=All%20Product%20Categories%2FToys%20%26%20Games%2FGames%2FTrading%20Card%20Games%2FBooster%20Packs&recommendedBrowseNodeId=485125011&itemName=\"\n\t\tself.set_id_create_queue(p_ids)\n\t\t#creates the proper dicts for amazon submission\n\t\tself.get_descriptions()\n\t\t#creates the asins\n\t\tamountOfItems = len(self.get_prod_info())\n\t\tprint(\"{0} items retrieved. Process will take around {1} minute(s)\".format(amountOfItems, (amountOfItems * self.timeEstimate) / 60))\n\t\tif self.retryCreate:\n\t\t\t#calls method that tries to create amazon listing several more times if it errors out the first time\n\t\t\tself.create_asins_v2()\n\t\telse:\n\t\t\tself.create_asins()\n\t\ttime.sleep(15)\n\t\tself.amazon_inst.go_to_search_page()\n\n\t\t#retrieves and then updates the ASIN descriptors in the catalog\n\t\t#also deletes the barcodes that were used from the barcode table in the database\n\t\tself.get_asins()\n\t\tself.t_url = \"https://catalog.amazon.com/abis/Classify/SelectCategory?itemType=collectible-single-trading-cards&productType=TOYS_AND_GAMES\"\n\n\n\n\tdef m_process(self):\n\t\t#creates asins then retrieves them\n\t\tprint(\"Retrieving product information from catalog.\")\n\t\tself.get_descriptions()\n\t\tprint(\"Creating ASINs\")\n\t\tself.create_asins()\n\t\tprint(\"Obtaining ASINs from Amazon\")\n\t\tself.amazon_inst.go_to_search_page()\n\t\tself.get_asins()\n\t\tprint(\"Fetched ASINs. Now updating product ASINs in catalog\")\n\t\tself.update_asins()\n\t\tprint(\"Updated product ASINs in catalog. Now deleting used barcodes from database.\")\n\t\tself.delete_bcodes()\n\t\tself.set_prod_info([])\n\tdef c_g_asins(self):\n\t\t#creates and then updates ASINs\n\t\tself.create_asins()\n\t\tself.amazon_inst.go_to_search_page()\n\t\tself.get_asins()\n\t\tself.update_asins()\n\n\tdef switch_bcode_type(self, x):\n\t\tif x == 'ean':\n\t\t\treturn 'upc'\n\t\telif x == 'upc':\n\t\t\treturn 'ean'\n\tdef delete_bcodes(self):\n\t\tsucc_lst = self.get_retr_lst()\n\t\tprint(\"Deleting barcodes\")\n\t\tself.dbObject.reconnect()#not the best solution, definitely need to have a try/except block for lost connections instead\n\t\tfor i in succ_lst:\n\t\t\tself.dbObject.cust_com(\"DELETE from barcodes WHERE barcode = \\\"{0}\\\";\".format(i[\"Barcode\"]))\n\t\tself.set_barcode_queue([])\n\tdef delete_bcodes_n(self, n):\n\t\tprint(\"Deleting barcodes\")\n\t\tbarcodes = self.dbObject.query(\"SELECT * from barcodes LIMIT {0};\".format(n))\n\t\tsucc_lst = [i[0] for i in barcodes]\n\t\tself.dbObject.reconnect()#not the best solution, definitely need to have a try/except block for lost connections instead\n\t\tfor i in succ_lst:\n\t\t\tprint(\"DELETING {0}\".format(str(i)))\n\t\t\tself.dbObject.cust_com(\"DELETE from barcodes WHERE barcode = \\\"{0}\\\";\".format(i))\n\n\tdef get_asins(self, update_all = False, update = True):\n\t\t#if update argument is true then the method automatically updates the products in the catalog with their new ASINs\n\t\tself.__asin_id_lst = []\n\t\tself.amazon_inst.go_to_search_page()\n\t\t#if update_all is true then it searches for ASINs using all of the product ids, even if they aren't on the retr_lst\n\t\tif update_all:\n\t\t\tp_ids = self.get_id_create_queue()\n\t\t\tp_ids = [{\"Product Id\": str(i)} for i in p_ids]\n\t\telse:\n\t\t\tp_ids = self.get_retr_lst()\n\t\tfor i in p_ids:\n\t\t\tself.__asin_id_lst.append(self.amazon_inst.grab_asin(i[\"Product Id\"]))\n\t\tif update:\n\t\t\tself.update_asins()\n\t\t\tself.delete_bcodes()\n\tdef update_asins(self, sql = True):\n\t\tissues = []\n\t\tself.cat_obj.reconnect()\n\t\tfor i in range(0, len(self.__asin_id_lst)):\n\t\t\t#not pythonic but the only way to ensure it doesn't add string containing \"None\"\n\t\t\t#also ASINs aren't always alphanumeric and can contain only letters\n\n\t\t\tif self.__asin_id_lst[i][1] != \"None\":\n\t\t\t\tif sql:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.cat_obj.update_product(str(self.__asin_id_lst[i][0]), 'asin', self.__asin_id_lst[i][1])\n\t\t\t\t\texcept KeyboardInterrupt as KE:\n\t\t\t\t\t\tbreak\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"General error occured\")\n\t\t\t\t\t\tprint(sys.exc_info()[:])\n\t\t\t\t\t\tissues.append(str(self.__asin_id_lst[i][0]))\n\n\n\t\t\t\telse:\n\t\t\t\t\tself.cat_update_inst.go_to(i[0])\n\t\t\t\t\tself.cat_update_inst.update_descriptor_all('Asin', i[1])\n\t\t\t\t\tself.cat_update_inst.click_update()\n\t\t\t\t\tself.cat_update_inst.load_check()\n\t\tif not issues:\n\t\t\tprint(\"Ran into issues updating the following products\")\n\t\t\treturn issues\n\tdef retr_asins_for_cat(self, cat_id, asin_filter = True):\n\t\t#retrieves product ids from catalog, puts them into a list of dictionaries and then assigns that list to retr_lst\n\t\tself.get_ids_cat(cat_id, asin_filter)\n\t\tp_ids = self.get_id_queue()\n\t\tresults = [{\"Product Id\": str(i)} for i in p_ids]\n\n\t\tself.set_retr_lst(results)\n\t\tself.get_asins()\n\n\n\tdef keep_live(self, interval = 30):\n\t\tdiff = time.time() - self.__keep_live_check\n\t\tif diff > interval:\n\t\t\tself.amazon_inst.browser.go_to(\"https://sellercentral.amazon.com/gp/homepage.html\")\n\t\t\tself.__keep_live_check = time.time()\n\tdef wait_stay_live(self, interval = 30):\n\t\twhile True:\n\n\t\t\ttry:\n\t\t\t\ttime.sleep(30)\n\n\t\t\texcept KeyboardInterrupt as KE:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tself.amazon_inst.browser.go_to(\"https://sellercentral.amazon.com/gp/homepage.html\")\n\tdef AmazonUnavailable(self):\n\t\tsite = self.amazon_inst.source()\n\t\ttitle = site.find(\"head\").find('title')\n\t\tif title is None:\n\t\t\treturn False\n\t\telif title.text == 'Website Temporarily Unavailable':\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\tdef updateTest(self, p_id):\n\t\t#static method for testing amazon update function\n\t\tprod_info = self.cat_obj.get_product(p_ids)\n\t\tself.amazon_inst.update_single(prod_info)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef export_csv13(lst):\n\tresults = []\n\theaders = list(lst[0].keys())\n\t#results = results + headers\n\n\tfor i in lst:\n\t\tresults.append(S_format(i).d_sort(headers))\n\tw_csv(results, \"exported_lst.csv\")\n\treturn results\n\n\n\n\n\n\n#need method that collects product information from catalog and makes an ASIN with it\n","sub_path":"Asin_update.py","file_name":"Asin_update.py","file_ext":"py","file_size_in_byte":17564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"147217916","text":"import os\n\nfrom .sa_database import Database\nfrom .sql_utils import run_sql_file\nfrom .utils import Duration\n\n\ndata = {\n 'campaign_info_all': os.getenv('CAMPAIGN_INFO_ALL'),\n 'field_data_field_campaigns': os.getenv('FIELD_DATA_FIELD_CAMPAIGNS'),\n 'node': os.getenv('NODE'),\n 'field_data_field_campaign_type': os.getenv(''.join(('FIELD_DATA_FIELD_'\n 'CAMPAIGN_TYPE'))),\n 'field_data_field_run_date': os.getenv('FIELD_DATA_FIELD_RUN_DATE'),\n 'field_data_field_call_to_action': os.getenv(''.join(('FIELD_DATA_FIELD_'\n 'CALL_TO_ACTION'))),\n 'field_data_field_reportback_noun': os.getenv(''.join(('FIELD_DATA_FIELD_'\n 'REPORTBACK_'\n 'NOUN'))),\n 'field_data_field_reportback_verb': os.getenv(''.join(('FIELD_DATA_FIELD_'\n 'REPORTBACK_'\n 'VERB'))),\n 'field_data_field_action_type': os.getenv('FIELD_DATA_FIELD_ACTION_TYPE'),\n 'taxonomy_term_data': os.getenv('TAXONOMY_TERM_DATA'),\n 'field_data_field_cause': os.getenv('FIELD_DATA_FIELD_CAUSE'),\n 'campaign_info': os.getenv('CAMPAIGN_INFO'),\n 'campaigns': os.getenv('CAMPAIGNS'),\n 'campaign_info_international': os.getenv('CAMPAIGN_INFO_INTERNATIONAL')\n}\n\n\ndef create():\n \"\"\"(Re)create materialized views: campaign_info_all, campaign_info,\n campaign_info_international.\n \"\"\"\n duration = Duration()\n run_sql_file('./data/sql/derived-tables/campaign_info.sql', data)\n duration.duration()\n\n\ndef refresh():\n db = Database()\n duration = Duration()\n\n # Setting statement for schema diffs of campaign_info_all\n campaign_all = \"REFRESH MATERIALIZED VIEW \" + data['campaign_info_all']\n db.query(campaign_all)\n db.query('REFRESH MATERIALIZED VIEW public.campaign_info')\n db.query('REFRESH MATERIALIZED VIEW public.campaign_info_international')\n db.disconnect()\n duration.duration()\n\n\nif __name__ == \"__create__\":\n create()\n\nif __name__ == \"__refresh__\":\n refresh()\n","sub_path":"quasar/campaign_info.py","file_name":"campaign_info.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"579786447","text":"\n\nfrom xai.brain.wordbase.nouns._port import _PORT\n\n#calss header\nclass _PORTED(_PORT, ):\n\tdef __init__(self,): \n\t\t_PORT.__init__(self)\n\t\tself.name = \"PORTED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"port\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_ported.py","file_name":"_ported.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"538676243","text":"# file: tf3_3.py\n# author: meikerwang\n# forward\nimport tensorflow as tf\n\n# 定义输入和参数\nx = tf.constant([[1.5, 2.0]])\nw1 = tf.Variable(tf.random_normal([2, 3], stddev=1.0, seed=1))\nw2 = tf.Variable(tf.random_normal([3, 1], stddev=1.0, seed=1))\n\n# 定义forward过程: 两层fc\na = tf.matmul(x, w1)\ny = tf.matmul(a, w2)\n\n# 计算sess\nwith tf.Session() as sess:\n init_op = tf.global_variables_initializer()\n sess.run(init_op)\n print(\"y in tf3_3.py is:\", sess.run(y))\n\n\"\"\"\n[[8.6353655]]\n\"\"\"\n","sub_path":"ch01_tf/tf3_3.py","file_name":"tf3_3.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"449552210","text":"# =================================================================\n#\n# Authors: Just van den Broecke \n# Tom Kralidis \n#\n# Copyright (c) 2019 Just van den Broecke\n# Copyright (c) 2019 Tom Kralidis\n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation\n# files (the \"Software\"), to deal in the Software without\n# restriction, including without limitation the rights to use,\n# copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following\n# conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n#\n# =================================================================\n\n# Needs to be run like: pytest -s test_sqlite_provider.py\n# In eclipse we need to set PYGEOAPI_CONFIG, Run>Debug Configurations>\n# (Arguments as py.test and set external variables to the correct config path)\n\nimport pytest\nfrom pygeoapi.provider.sqlite import SQLiteProvider\n\n\n@pytest.fixture()\ndef config():\n return {\n 'name': 'Sqlite',\n 'data': './tests/data/ne_110m_admin_0_countries.sqlite',\n 'id_field': 'ogc_fid',\n 'table': 'ne_110m_admin_0_countries'\n }\n\n\ndef test_query(config):\n \"\"\"Testing query for a valid JSON object with geometry\"\"\"\n\n p = SQLiteProvider(config)\n feature_collection = p.query()\n assert feature_collection.get('type', None) == 'FeatureCollection'\n features = feature_collection.get('features', None)\n assert features is not None\n feature = features[0]\n properties = feature.get('properties', None)\n assert properties is not None\n geometry = feature.get('geometry', None)\n assert geometry is not None\n\n\ndef test_get(config):\n p = SQLiteProvider(config)\n result = p.get(118)\n assert isinstance(result, dict)\n assert 'geometry' in result\n assert 'properties' in result\n assert 'id' in result\n assert 'Netherlands' in result['properties']['admin']\n","sub_path":"tests/test_sqlite_provider.py","file_name":"test_sqlite_provider.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"296847098","text":"import os\nimport sys\nimport numpy as np\n\nfrom file_navigation import *\n\nDEEPNESS=1\n\n# -- --\n\ndef read_optimum(fname):\n fi = open(fname)\n nfacs_rep = None\n time = None\n k = None\n for lin in fi:\n lin = lin.strip()\n lin = lin.replace(\" \",\" \")\n if len(lin)==0: continue\n if \"#\" in lin:\n if \"#runtime:\" in lin:\n time = int(lin.replace(\"#runtime:\",\"\"))\n if \"#nfacs:\" in lin:\n nfacs_rep = int(lin.replace(\"#nfacs:\",\"\"))\n else:\n k = lin.split()\n fi.close()\n assert(k)\n assigns = [int(x) for x in k[:-1]]\n if not (nfacs_rep is None or len(set(assigns))<=nfacs_rep):\n print(fname)\n print(nfacs_rep)\n assert(False)\n value = float(k[-1])\n return assigns,value,time\n\ndef read_problem(fname):\n fi = open(fname)\n mode = 0\n for lin in fi:\n lin=lin.strip()\n if \"FILE\" in lin:\n mode = 1\n continue\n if mode==0:\n n,m = [int(x) for x in lin.split(' ')]\n p = 0\n else:\n n,m,p = [int(x) for x in lin.split(' ')]\n break\n fi.close()\n return n,m,p\n\ndef read_solution(fname):\n if not os.path.isfile(fname):\n return None,None,None,None,None\n # ---\n fi = open(fname)\n assigns = None\n value = None\n time = None\n iters = None\n finalsols = None\n for lin in fi:\n if \"# Time:\" in lin:\n time = float(lin.split(\" \")[-1])\n if \"# Iterations:\" in lin:\n iters = float(lin.split(\" \")[-1])\n if \"# Final_solutions:\" in lin:\n finalsols = float(lin.split(\" \")[-1])\n if (assigns is None) and (\"Assigns:\" in lin):\n assigns = [int(x) for x in lin.split()[1:]]\n if (value is None) and (\"Value:\" in lin):\n value = -float(lin.split()[1])\n if (assigns is not None) and (value is not None):\n break\n fi.close()\n assert(assigns is not None)\n assert(value is not None)\n assert(time is not None)\n #\n return assigns,value,time,iters,finalsols\n\ndef is_optimum(sol,opt,can_be_better):\n opt_assi = opt[0]\n opt_val = opt[1]\n sol_assi = sol[0]\n sol_val = sol[1]\n if sol_assi is None:\n assert(sol_val is None)\n return 0\n sol_facts = set(sol_assi)\n opt_facts = set(opt_assi)\n if sol_facts == opt_facts: return 1\n if sol_val <= opt_val:\n if sol_val<=opt_val-0.01:\n assert(can_be_better)\n return 2\n else:\n return 1\n return 0\n\nif __name__ == '__main__':\n\n # -- parse input --\n if len(sys.argv)!=3:\n print(\"usage: python %s \"%(sys.argv[0]))\n print(\"e.g.: python %s %s %s\"%(sys.argv[0],'splp','res/dc_norm_s_200_400/splp'))\n sys.exit(1)\n\n prob_dir = sys.argv[1]\n sols_dir = sys.argv[2]\n\n\n problems = {}\n problems['opt'] = get_dirs(prob_dir,\".opt\")\n problems['bub'] = get_dirs(prob_dir,\".bub\")\n\n # Grand totals\n total_perces = []\n total_times = []\n total_times_ls = []\n total_iters = []\n total_finals = []\n total_optis = 0\n\n summary = {}\n for kind in ('opt','bub'):\n summary[kind] = []\n print(\"=\"*80)\n print(\"> PROBLEMS: \"+kind.upper())\n print(\"=\"*80)\n prob_names = sorted(list(problems[kind]))\n # Identify the problem groups (last folders):\n\n get_group = lambda x: tuple(x[:-1] if len(x)<=DEEPNESS else x[:DEEPNESS])\n group_names = sorted(list(set([get_group(x) for x in prob_names])))\n\n # --- Table\n table = []\n # ---\n\n if kind=='bub':\n # Metrics only for .bub\n bub_perces = []\n bub_times = []\n bub_times_ls = []\n bub_iters = []\n bub_finals = []\n bub_optis = 0\n bub_betters = 0\n\n for group in group_names:\n group_name = '/'.join(group)\n if group_name=='kmedian': continue #NOTE: hardcoded exlcusion of kmedian\n\n # Find the specific problems for this group:\n group_prob_names = [x for x in prob_names if get_group(x)==group]\n\n strbases = {}\n nfacs = []\n nclis = []\n opt_nfacs = []\n ps = []\n opt_times = []\n\n\n for prob in group_prob_names:\n joined = os.path.join(*prob)\n opt_fname = os.path.join(prob_dir,joined)\n prob_fname = opt_fname.replace('.'+kind,'')\n if not os.path.isfile(prob_fname):\n print(\"ERROR: %s does not exists.\"%prob_fname)\n sys.exit(1)\n n,m,p = read_problem(prob_fname)\n nfacs.append(n)\n nclis.append(m)\n ps.append(p)\n opt_data = read_optimum(opt_fname)\n # --- Maximums\n n_opt_facilities = len(set(opt_data[0]))\n n_clients = len(opt_data[0])\n opt_nfacs.append(n_opt_facilities)\n if opt_data[2] is not None:\n opt_times.append(opt_data[2])\n strbases[prob_fname] = \"%-35s n:%5d on:%5d ov:%9.3f\"%(\n joined,n_clients,n_opt_facilities,opt_data[1])\n\n # Print problem and opt solutions description\n print(\"-\"*20)\n min_p = np.min(ps)\n max_p = np.max(ps)\n str_p = str(min_p) if min_p==max_p else \"%d-%d\"%(min_p,max_p)\n min_nfacs = np.min(nfacs)\n max_nfacs = np.max(nfacs)\n str_nfacs = str(min_nfacs) if min_nfacs==max_nfacs else \"%d-%d\"%(min_nfacs,max_nfacs)\n min_nclis = np.min(nclis)\n max_nclis = np.max(nclis)\n str_nclis = str(min_nclis) if min_nclis==max_nclis else \"%d-%d\"%(min_nclis,max_nclis)\n min_opt_nfacs = np.min(opt_nfacs)\n max_opt_nfacs = np.max(opt_nfacs)\n str_opt_nfacs = str(min_opt_nfacs) if min_opt_nfacs==max_opt_nfacs else \"%d-%d\"%(min_opt_nfacs,max_opt_nfacs)\n opt_time_mean = float('inf') if len(opt_times)==0 else np.mean(opt_times)\n opt_time_std = float('inf') if len(opt_times)==0 else np.std(opt_times)\n\n print(\"%-35s (%d probs) n:%s m:%s p:%s on:%s otime: %.2f+-%.2f\"%(group_name,\n len(group_prob_names),\n str_nfacs,\n str_nclis,\n str_p,\n str_opt_nfacs,\n opt_time_mean,opt_time_std))\n\n for mode in ('','_ls'):\n strings = []\n perces = []\n perces_all = []\n times = []\n iters = []\n finals = []\n optis = 0\n betters = 0\n nones = 0\n for prob in group_prob_names:\n joined = os.path.join(*prob)\n # --- Get the solution\n sol_fname = os.path.join(sols_dir,joined)\n sol_fname = sol_fname[:-4]\n sol_data = read_solution(sol_fname+mode)\n opt_fname = os.path.join(prob_dir,joined)\n opt_data = read_optimum(opt_fname)\n prob_fname = opt_fname.replace('.'+kind,'')\n # --- Check for optimality\n perce = 0\n show = False\n if sol_data[0] is None:\n nones += 1\n else:\n opt = is_optimum(sol_data,opt_data,kind=='bub')\n if opt==1:\n optis += 1\n elif opt==2:\n betters += 1\n else:\n show = True\n perce = 0 if opt_data[1] is None else sol_data[1]/opt_data[1]\n times.append(sol_data[2])\n iters.append(sol_data[3])\n finals.append(sol_data[4])\n if show:\n if mode=='_ls':\n strings.append(strbases[prob_fname]+\" v:%9.3f %8.4f\"%(\n sol_data[1],perce))\n if (show or kind=='bub') and perce!=0:\n perces.append(perce)\n if perce!=0:\n perces_all.append(perce)\n else:\n print(\"\\033[0;31m WARNING: perces=0!!!! \\033[0m\")\n # For the all summary:\n if kind=='bub':\n if mode=='_ls':\n bub_perces += perces_all\n bub_times_ls += times\n bub_iters += iters\n bub_finals += finals\n bub_optis += optis\n bub_betters += betters\n else:\n bub_times += times\n\n if mode=='_ls':\n total_perces += perces_all\n total_times_ls += times\n total_iters += iters\n total_finals += finals\n total_optis += optis+betters\n else:\n total_times += times\n # Print solutions description\n if nones==0:\n red = ''\n noc = ''\n else:\n red = '\\033[0;31m'\n noc = '\\033[0m'\n perce = \" - \" if len(perces)==0 else \"%9.6f\"%(np.mean(perces))\n time = \" - \" if len(times)==0 else \"%8.3f+-%-8.3f\"%(np.mean(times),np.std(times))\n time2 = \" - \" if len(times)==0 else \"%8.3f-%-8.3f\"%(np.min(times),np.max(times))\n iter = \" - \"if len(iters)==0 else \"%3d-%-3d\"%(np.min(iters),np.max(iters))\n final = \" - \"if len(finals)==0 else \"%3d-%-3d\"%(np.min(finals),np.max(finals))\n print(\"%-35s opt:%3d/%-3d %snons:%3d/%-3d%s perce:%s t:%s it:%s fs:%s\"%(\n sols_dir+('' if mode=='' else \"(\"+mode+\")\"),\n optis,len(group_prob_names),\n red,nones,len(group_prob_names),noc,\n perce,time,iter,final))\n for stri in strings:\n print(stri)\n # --- Table\n n_probs = len(group_prob_names)\n if nones= ceil(low)`. Its shape must broadcast with the shape of samples\n from `distribution` and must not result in additional batch dimensions\n after broadcasting.\n high: Highest possible quantized value, such that samples are\n `y <= floor(high)`. Its shape must broadcast with the shape of samples\n from `distribution` and must not result in additional batch dimensions\n after broadcasting.\n \"\"\"\n self._dist = conversion.as_distribution(distribution)\n if self._dist.event_shape:\n raise ValueError(f'The base distribution must be univariate, but its '\n f'`event_shape` is {self._dist.event_shape}.')\n dtype = self._dist.dtype\n if low is None:\n self._low = None\n else:\n self._low = jnp.asarray(jnp.ceil(low), dtype=dtype)\n if len(self._low.shape) > len(self._dist.batch_shape):\n raise ValueError('The parameter `low` must not result in additional '\n 'batch dimensions.')\n if high is None:\n self._high = None\n else:\n self._high = jnp.asarray(jnp.floor(high), dtype=dtype)\n if len(self._high.shape) > len(self._dist.batch_shape):\n raise ValueError('The parameter `high` must not result in additional '\n 'batch dimensions.')\n super().__init__()\n\n @property\n def distribution(self) -> DistributionT:\n \"\"\"Base distribution `p(x)`.\"\"\"\n return self._dist\n\n @property\n def low(self) -> Optional[Array]:\n \"\"\"Lowest value that quantization returns.\"\"\"\n if self._low is None:\n return None\n return jnp.broadcast_to(self._low, self.batch_shape + self.event_shape)\n\n @property\n def high(self) -> Optional[Array]:\n \"\"\"Highest value that quantization returns.\"\"\"\n if self._high is None:\n return None\n return jnp.broadcast_to(self._high, self.batch_shape + self.event_shape)\n\n @property\n def event_shape(self) -> Tuple[int, ...]:\n \"\"\"Shape of event of distribution samples.\"\"\"\n return self.distribution.event_shape\n\n @property\n def batch_shape(self) -> Tuple[int, ...]:\n \"\"\"Shape of batch of distribution samples.\"\"\"\n return self.distribution.batch_shape\n\n def _sample_n(self, key: PRNGKey, n: int) -> Array:\n \"\"\"See `Distribution._sample_n`.\"\"\"\n samples = self.distribution.sample(seed=key, sample_shape=n)\n samples = jnp.ceil(samples)\n\n # Apply overflow and underflow conditions.\n if self.low is not None:\n samples = jnp.where(samples < self.low, self.low, samples)\n if self.high is not None:\n samples = jnp.where(samples > self.high, self.high, samples)\n\n return samples\n\n def _sample_n_and_log_prob(self, key: PRNGKey, n: int) -> Tuple[Array, Array]:\n \"\"\"See `Distribution._sample_n_and_log_prob`.\"\"\"\n samples = self._sample_n(key, n)\n log_cdf = self.distribution.log_cdf(samples)\n log_cdf_m1 = self.distribution.log_cdf(samples - 1.)\n if self.high is not None:\n log_cdf = jnp.where(samples < self.high, log_cdf, 0.)\n if self.low is not None:\n log_cdf_m1 = jnp.where(samples - 1. < self.low, -jnp.inf, log_cdf_m1)\n log_probs = math.log_expbig_minus_expsmall(log_cdf, log_cdf_m1)\n return samples, log_probs\n\n def log_prob(self, value: Array) -> Array:\n \"\"\"Calculates the log probability of an event.\n\n This implementation differs slightly from the one in TFP, as it returns\n `-jnp.inf` on non-integer values instead of returning the log prob of the\n floor of the input. In addition, this implementation also returns `-jnp.inf`\n on inputs that are outside the support of the distribution (as opposed to\n `nan`, like TFP does). On other integer values, both implementations are\n identical.\n\n Args:\n value: An event.\n\n Returns:\n The log probability log P(value).\n \"\"\"\n is_integer = jnp.where(value > jnp.floor(value), False, True)\n log_cdf = self.log_cdf(value)\n log_cdf_m1 = self.log_cdf(value - 1.)\n log_probs = math.log_expbig_minus_expsmall(log_cdf, log_cdf_m1)\n return jnp.where(jnp.isinf(log_cdf), -jnp.inf,\n jnp.where(is_integer, log_probs, -jnp.inf))\n\n def prob(self, value: Array) -> Array:\n \"\"\"Calculates the probability of an event.\n\n This implementation differs slightly from the one in TFP, as it returns 0\n on non-integer values instead of returning the prob of the floor of the\n input. It is identical for integer values.\n\n Args:\n value: An event.\n\n Returns:\n The probability P(value).\n \"\"\"\n is_integer = jnp.where(value > jnp.floor(value), False, True)\n cdf = self.cdf(value)\n cdf_m1 = self.cdf(value - 1.)\n probs = cdf - cdf_m1\n return jnp.where(is_integer, probs, 0.)\n\n def log_cdf(self, value: Array) -> Array:\n \"\"\"See `Distribution.log_cdf`.\"\"\"\n y = jnp.floor(value)\n result = self.distribution.log_cdf(y)\n if self.low is not None:\n result = jnp.where(y < self.low, -jnp.inf, result)\n if self.high is not None:\n result = jnp.where(y < self.high, result, 0.)\n return result\n\n def cdf(self, value: Array) -> Array:\n \"\"\"See `Distribution.cdf`.\"\"\"\n y = jnp.floor(value)\n result = self.distribution.cdf(y)\n if self.low is not None:\n result = jnp.where(y < self.low, 0., result)\n if self.high is not None:\n result = jnp.where(y < self.high, result, 1.)\n return result\n","sub_path":"distrax/_src/distributions/quantized.py","file_name":"quantized.py","file_ext":"py","file_size_in_byte":7347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"248764925","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom models import CraigsData\nimport urllib2\nimport unicodedata\nfrom models import *\nimport operator\nfrom django.http import HttpResponseRedirect, HttpResponse\n\ndef searchlistings(words,alllistings):\n FinishedObjList = []\n if words == \"\":\n # for x in alllistings:\n # FinishedObjList.append(x[0])\n\n return alllistings\n ObjWordsSpotedDict = {}\n\n words = unicodedata.normalize('NFKD', words).encode('ascii','ignore')\n words = str.split(words)\n\n # listing objects\n for x in alllistings:\n matches = 0\n title = x.title\n title = unicodedata.normalize('NFKD', title).encode('ascii','ignore')\n TitleListOfWords = str.split(title)\n # eachwoord in title\n for xTitleListOfWords in TitleListOfWords:\n # each inputed word\n for xwords in words:\n xwords = xwords.lower()\n xTitleListOfWords = xTitleListOfWords.lower()\n if xwords == xTitleListOfWords:\n matches = matches+1\n if(matches!=0):\n ObjWordsSpotedDict[x] = matches\n\n # SortedList = sorted(ObjWordsSpotedDict,key=lambda x: x[1])\n SortedList = sorted(ObjWordsSpotedDict.items(), key=operator.itemgetter(1))\n SortedList.reverse()\n\n for x in SortedList:\n FinishedObjList.append(x[0])\n\n return FinishedObjList\n\n\n\n\n\n\n","sub_path":"project/names/searchlistings.py","file_name":"searchlistings.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"642960418","text":"import numpy as np\nimport tensorflow as tf\n\ndef random_rotation(so3):\n angle_x = np.random.uniform() * 2 * np.pi if so3 else 0\n angle_y = np.random.uniform() * 2 * np.pi\n angle_z = np.random.uniform() * 2 * np.pi if so3 else 0\n\n Rx = np.array([[1, 0, 0],\n [0, np.cos(angle_x), -np.sin(angle_x)],\n [0, np.sin(angle_x), np.cos(angle_x)]])\n Ry = np.array([[np.cos(angle_y), 0, np.sin(angle_y)],\n [0, 1, 0],\n [-np.sin(angle_y), 0, np.cos(angle_y)]])\n Rz = np.array([[np.cos(angle_z), -np.sin(angle_z), 0],\n [np.sin(angle_z), np.cos(angle_z), 0],\n [0, 0, 1]])\n R = np.dot(Rz, np.dot(Ry, Rx))\n return R\n\ndef euldeg2rotm(x,y,z):\n angle_x = x / 180 * np.pi\n angle_y = y / 180 * np.pi\n angle_z = z / 180 * np.pi\n\n Rx = np.array([[1, 0, 0],\n [0, np.cos(angle_x), -np.sin(angle_x)],\n [0, np.sin(angle_x), np.cos(angle_x)]])\n Ry = np.array([[np.cos(angle_y), 0, np.sin(angle_y)],\n [0, 1, 0],\n [-np.sin(angle_y), 0, np.cos(angle_y)]])\n Rz = np.array([[np.cos(angle_z), -np.sin(angle_z), 0],\n [np.sin(angle_z), np.cos(angle_z), 0],\n [0, 0, 1]])\n R = np.dot(Rz, np.dot(Ry, Rx))\n return R\n\ndef rotm2quat(rotm):\n # http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/\n\n m00 = rotm[0,0]\n m01 = rotm[0,1]\n m02 = rotm[0,2]\n m10 = rotm[1,0]\n m11 = rotm[1,1]\n m12 = rotm[1,2]\n m20 = rotm[2,0]\n m21 = rotm[2,1]\n m22 = rotm[2,2]\n\n tr = m00 + m11 + m22\n\n if (tr > 0): \n S = ((tr + 1.0) ** 0.5) * 2\n qw = 0.25 * S\n qx = (m21 - m12) / S\n qy = (m02 - m20) / S\n qz = (m10 - m01) / S\n elif (m00 > m11) and (m00 > m22):\n S = ((1.0 + m00 - m11 - m22) ** 0.5) * 2\n qw = (m21 - m12) / S\n qx = 0.25 * S\n qy = (m01 + m10) / S\n qz = (m02 + m20) / S\n elif (m11 > m22):\n S = ((1.0 + m11 - m00 - m22) ** 0.5) * 2\n qw = (m02 - m20) / S\n qx = (m01 + m10) / S\n qy = 0.25 * S\n qz = (m12 + m21) / S\n else:\n S = ((1.0 + m22 - m00 - m11) ** 0.5) * 2\n qw = (m10 - m01) / S\n qx = (m02 - m20) / S\n qy = (m12 + m21) / S\n qz = 0.25 * S\n\n quat = np.zeros(4)\n quat[0] = qw\n quat[1] = qx\n quat[2] = qy\n quat[3] = qz\n\n return quat\n\ndef quat2rotm(quat):\n # https://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/index.htm\n # assume input is [w, x, y, z]\n \n # normalize first\n quat = quat / np.linalg.norm(quat)\n\n qw = quat[0]\n qx = quat[1]\n qy = quat[2]\n qz = quat[3]\n rotmat = np.zeros((3,3))\n rotmat[0,0] = 1 - 2*qy**2 - 2*qz**2\n rotmat[0,1] = 2*qx*qy - 2*qz*qw\n rotmat[0,2] = 2*qx*qz + 2*qy*qw\n \n rotmat[1,0] = 2*qx*qy + 2*qz*qw\n rotmat[1,1] = 1 - 2*qx**2 - 2*qz**2\n rotmat[1,2] = 2*qy*qz - 2*qx*qw\n\n rotmat[2,0] = 2*qx*qz - 2*qy*qw\n rotmat[2,1] = 2*qy*qz + 2*qx*qw\n rotmat[2,2] = 1 - 2*qx**2 - 2*qy**2\n\n return rotmat\n\ndef euldeg2quat(x,y,z):\n # https://www.euclideanspace.com/maths/geometry/rotations/conversions/eulerToQuaternion/index.htm\n angle_x = x / 180 * np.pi\n angle_y = y / 180 * np.pi\n angle_z = z / 180 * np.pi\n\n c1 = np.cos(angle_y / 2)\n c2 = np.cos(angle_z / 2)\n c3 = np.cos(angle_x / 2)\n s1 = np.sin(angle_y / 2)\n s2 = np.sin(angle_z / 2)\n s3 = np.sin(angle_x / 2)\n\n qw = c1 * c2 * c3 + s1 * s2 * s3\n qx = c1 * c2 * s3 - s1 * s2 * c3\n qy = s1 * c2 * c3 + c1 * s2 * s3\n qz = c1 * s2 * c3 - s1 * c2 * s3\n\n quat = np.zeros(4)\n quat[0] = qw\n quat[1] = qx\n quat[2] = qy\n quat[3] = qz\n\n return quat\n\ndef transform_tf(points, transform):\n # in: B x N x 3, B x 4 x 4\n # out: B x N x 3\n\n R = transform[:, 0:3, 0:3]\n t = tf.tile(tf.expand_dims(transform[:, 0:3, 3], axis=1), [1, tf.shape(points)[1], 1])\n\n points_transform = tf.linalg.matmul(points, R, transpose_b=True)\n points_transform = points_transform + t\n\n return points_transform\n\ndef quat2rotm_tf(quat):\n # in: B x 4\n # out: B x 3 x 3\n\n # https://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/index.htm\n # assume input is [w, x, y, z]\n \n # normalize first\n quat_norm = tf.tile(tf.norm(quat, axis=1, keepdims=True),[1, 4])\n quat = quat / quat_norm\n\n qw = quat[:, 0]\n qx = quat[:, 1]\n qy = quat[:, 2]\n qz = quat[:, 3]\n \n rotmat = tf.zeros((tf.shape(quat)[0],3,3))\n\n rotmat00 = 1 - 2*qy**2 - 2*qz**2\n rotmat01 = 2*qx*qy - 2*qz*qw\n rotmat02 = 2*qx*qz + 2*qy*qw\n rotmat0 = tf.stack([rotmat00, rotmat01, rotmat02], axis=1)\n\n rotmat10= 2*qx*qy + 2*qz*qw\n rotmat11 = 1 - 2*qx**2 - 2*qz**2\n rotmat12 = 2*qy*qz - 2*qx*qw\n rotmat1 = tf.stack([rotmat10, rotmat11, rotmat12], axis=1)\n\n rotmat20 = 2*qx*qz - 2*qy*qw\n rotmat21 = 2*qy*qz + 2*qx*qw\n rotmat22 = 1 - 2*qx**2 - 2*qy**2\n rotmat2 = tf.stack([rotmat20, rotmat21, rotmat22], axis=1)\n\n rotmat = tf.stack([rotmat0, rotmat1, rotmat2], axis=1)\n\n return rotmat\n\ndef mat6d2rotm_tf(mat6d):\n # in: B x 6\n # out: B x 3 x 3\n\n # https://arxiv.org/pdf/1812.07035.pdf\n # see eq. 15/16 in supp. mat.\n\n def N(x):\n x_norm = tf.tile(tf.norm(x, axis=1, keepdims=True), [1, x.shape[1]])\n return x / x_norm\n\n a1 = mat6d[:, 0:3]\n a2 = mat6d[:, 3:6]\n b1 = N(a1)\n dotprod = tf.reduce_sum(tf.multiply(b1, a2), 1, keepdims=True)\n b2 = N(a2 - dotprod * b1)\n b3 = tf.cross(b1, b2)\n \n rotmat = tf.stack([b1, b2, b3], axis=2)\n\n return rotmat\n\ndef rotm2mat6d_tf(rotm):\n # in: B x 3 x 3\n # out: B x 6\n\n # https://arxiv.org/pdf/1812.07035.pdf\n # see eq. 14 in supp. mat.\n\n mat6d = rotm[:, :, 0:2]\n mat6d = tf.transpose(mat6d, perm=[0, 2, 1])\n mat6d = tf.reshape(mat6d, [mat6d.shape[0], 6])\n\n return mat6d","sub_path":"transform_util.py","file_name":"transform_util.py","file_ext":"py","file_size_in_byte":6000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"341821960","text":"import argparse\nimport copy\nimport distutils.cmd\nimport distutils.log\nimport datetime\nimport os\nimport subprocess\nimport sys\n\nfrom .trade_time import Papa\n\nparser = argparse.ArgumentParser(description=\"bee bee bee~\")\nparser.add_argument('-tt', '--tradetime', help='更新交易日历/Update transaction calendar;'\n 'Example: -tt 2019 or -tt 2004-2020 or -tt now')\n\nparser.add_argument(\"-auto\", '--generate', help=\"对于linux自动生成中文环境\")\n\n\ndef tradetime_handle(year):\n if \"-\" in year:\n y = year.split(\"-\")\n elif year == 'now':\n y = [2004, datetime.datetime.now().year]\n else:\n y = [2004, year]\n Papa.run(y)\n Papa.write()\n\n\ndef update_locale():\n with open(\"/etc/locale.gen\", \"a+\") as f:\n code_lines = [\n \"zh_CN.GB18030 GB18030\",\n \"en_US.UTF-8 UTF-8\",\n \"zh_CN.UTF-8 UTF-8\"\n ]\n for x in code_lines:\n f.write(x + \"\\n\")\n os.system(\"locale-gen\")\n\n\ndef execute():\n if len(sys.argv) <= 1:\n print('[*]Tip: ctpbee -h view help')\n sys.exit(0)\n args = parser.parse_args()\n # argv_value\n year = args.tradetime\n # handle\n if year:\n tradetime_handle(year)\n return\n\n auto = args.generate\n if auto == \"generate\":\n update_locale()\n return\n\n\n","sub_path":"ctpbee/cmdline.py","file_name":"cmdline.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"307410936","text":"\"\"\"\nCL-RRT Complete Edition\n\nAlgorithm Ref: Chance Constrained RRT fot Probabilistic Robustness to Environment Uncertainty\n\nset all angle as rad\nno control noise\n\"\"\"\n\nimport math\nimport os\nimport random\nimport sys\nimport time\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Ellipse\nimport numpy as np\nfrom scipy.special import erf, erfinv\nfrom cc_rrt import CCRRT, Vehicle, obstacle_uncertainty_fusion, draw_vehicle, draw_carsize_of_final_path\n\nclass CLRRT(CCRRT):\n \"\"\"\n close-loop RRT\n \"\"\"\n def __init__(self, car, start, goal, obstacle_list, rand_area):\n super().__init__(car, start, goal, obstacle_list, rand_area)\n \n self.max_iter = 300\n self.max_n_path = 80 # save no more than n_path feasible path to choose\n self.n_path_when_change_strategy = 25\n self.max_n_node = 2500 # save no more than max_node nodes on tree\n\n self.nearest_node_step = 8 # get nodes to do tree expanding, used in get_nearest_node_index\n self.n_nearest = 15 # get n nearest nodes, used in get_nearest_node_index\n self.steer_back_step = 8 # used after find a path and try connect to goal after steering\n print(\"Begin CL-RRT\")\n \n def steer(self, from_node, to_node):\n \"\"\"\n steer with chance constrain checking\n begin: from_node\n return path = [inter_node, ..., inter_node, to_node(if feasible)]\n \"\"\"\n # reference v & w\n dis, angle = self.calc_distance_and_angle(from_node, to_node)\n angle = self.angle_wrap(angle - from_node.yaw)\n\n if abs(angle) > math.pi / 3.0:\n self.P[0,0] = 0.05\n self.D[0,0] = -0.10\n self.min_vehicle_speed = 1.0\n self.max_vehicle_turn_rate = math.pi\n elif abs(angle) > math.pi / 6.0:\n self.P[0,0] = 0.25\n self.D[0,0] = -0.5\n self.min_vehicle_speed = 6.5\n self.max_vehicle_turn_rate = math.pi\n else:\n self.P[0,0] = 1.0\n self.D[0,0] = -2.0\n self.min_vehicle_speed = 13.0\n self.max_vehicle_turn_rate = math.pi / 2.0\n\n u_p = self.P.dot(np.array([[dis],[angle]]))\n u_d = np.zeros((2,1))\n u = u_p + u_d\n u[0,0] = max(self.min_vehicle_speed, min(u[0,0], self.max_vehicle_speed))\n if abs(u[1,0]) > self.max_vehicle_turn_rate:\n u[1,0] = np.sign(angle) * self.max_vehicle_turn_rate\n \n # prev node: deep copy from the from_node\n prev = self.Node(from_node.x, from_node.y, from_node.yaw) # local init\n prev.conv = from_node.conv\n prev.cost = from_node.cost\n prev.parent = from_node.parent\n prev.time = from_node.time\n prev.cc = from_node.cc\n prev.cost_lb = from_node.cost_lb\n prev.cost_ub = from_node.cost_ub\n\n prev_dis = dis\n prev_angle = angle\n\n J1 = np.diag([1.0, 1.0, 1.0])\n\n J2 = np.zeros((3,2))\n J2[0,0] = self.delta_time * math.cos(prev.yaw)\n J2[1,0] = self.delta_time * math.sin(prev.yaw)\n J2[2,1] = self.delta_time\n\n # get feasible node from N_near->N_sample\n feasible_node_list = []\n n_step = 0\n # feaisble_to_end = True\n while self.calc_distance(prev, to_node) > self.dis_threshold and n_step < self.max_steer_step:\n pose = J1.dot(np.array([[prev.x], [prev.y], [prev.yaw]])) + J2.dot(u)\n inter_node = self.Node(pose[0].item(), pose[1].item(), pose[2].item())\n inter_node.yaw = self.angle_wrap(inter_node.yaw)\n inter_node.parent = prev\n inter_node.conv = J1.dot(prev.conv).dot(J1.transpose()) + \\\n J2.dot(self.sigma_control).dot(J2.transpose()) + \\\n self.sigma_pose\n inter_node.cc = self.get_chance_constrain(inter_node)\n if self.is_feasible(inter_node) and self.safe_steer(inter_node):\n inter_node.time = prev.time + self.delta_time\n inter_node.cost = self.get_cost(inter_node.time, inter_node.cc)\n inter_node.cost_lb = self.get_cost_lb(inter_node)\n feasible_node_list.append(inter_node)\n prev = inter_node # inter_node will point to the next inter_node\n \n # update J1 J2\n J2[0,0] = self.delta_time * math.cos(prev.yaw)\n J2[1,0] = self.delta_time * math.sin(prev.yaw)\n J2[2,1] = self.delta_time\n\n dis, angle = self.calc_distance_and_angle(prev, to_node)\n angle = self.angle_wrap(angle - prev.yaw)\n\n if abs(angle) > math.pi / 3.0:\n self.P[0,0] = 0.05\n self.D[0,0] = -0.10\n self.min_vehicle_speed = 1.0\n self.max_vehicle_turn_rate = math.pi\n elif abs(angle) > math.pi / 6.0:\n self.P[0,0] = 0.25\n self.D[0,0] = -0.5\n self.min_vehicle_speed = 6.5\n self.max_vehicle_turn_rate = math.pi\n else:\n self.P[0,0] = 1.0\n self.D[0,0] = -2.0\n self.min_vehicle_speed = 13.0\n self.max_vehicle_turn_rate = math.pi / 2.0\n\n u_p = self.P.dot(np.array([[dis],[angle]]))\n u_d = self.D.dot(np.array([[dis - prev_dis], [angle - prev_angle]]))\n u = u_p + u_d\n u[0,0] = max(self.min_vehicle_speed, min(u[0,0], self.max_vehicle_speed))\n if abs(u[1,0]) > self.max_vehicle_turn_rate:\n u[1,0] = np.sign(angle) * self.max_vehicle_turn_rate\n prev_dis = dis\n prev_angle = angle\n n_step += 1\n else:\n # feaisble_to_end = False\n break\n\n return feasible_node_list\n\n def backpropogation(self, node):\n \"\"\"\n backpropogation to update cost-upper-bound of a path from start to goal\n the first node is the closest to the goal\n \"\"\"\n min_child_upper_bound = math.inf # record lowest cost-upper-bound from a node to its childs\n # update upper bound\n # back from goal to root\n while node is not None and self.calc_distance(node, self.end) < self.dis_threshold: # for nodes in the goal region\n node.cost_ub = node.cost_lb\n min_child_upper_bound = min(min_child_upper_bound + self.delta_time, node.cost_ub)\n node = node.parent\n while node is not None: # for nodes out of the goal region\n node.cost_ub = min(min_child_upper_bound + self.delta_time, node.cost_ub)\n min_child_upper_bound = min(min_child_upper_bound + self.delta_time, node.cost_ub)\n node = node.parent\n \n def get_close_to_goal_index(self, node_list):\n \"\"\"\n get the node close to goal\n \"\"\"\n dlist = [self.get_expect_time_to_goal(node) for node in node_list]\n minind = dlist.index(min(dlist))\n return minind\n \n def get_cost(self, time, chance_constrain):\n return time\n \n\ndef main():\n print(\"Start \" + __file__)\n\n area = [-2, 20, -2, 20] # x-min x-max y-min y-max\n\n # Set Initial parameters\n start = [7.5, -1.0, np.deg2rad(90.0)]\n goal = [7.5, 18.0, np.deg2rad(90.0)]\n car = Vehicle()\n\n # ====Search Path with CCRRT====\n # (x, y, vehicle_length, vehicle_width, radius [-pi, pi])\n # axis = length + sigma\n obstacle_list_gt = [\n (4, 4, 3, 2, np.deg2rad(80.0)),\n # (3, 7, 3, 2, np.deg2rad(65.0)),\n (12, 8, 4, 2.5, np.deg2rad(75.0)),\n # (9, 11, 4, 2, np.deg2rad(80.0)),\n (11, 12, 5, 2.5, np.deg2rad(90.0)),\n # (9, 5, 5, 2.3, np.deg2rad(68.0)),\n (8, 14, 3, 2, np.deg2rad(75.0)),\n # (6, 12, 5, 3, np.deg2rad(80.0)),\n ]\n\n # sigam_ver, sigma_hor, sigma_radius\n obstacle_list_uncertainty = [\n (0.05, 0.02, 0.02),\n # (0.07, 0.03, 0.02),\n (0.2, 0.1, 0.06),\n # (0.18, 0.15, 0.04),\n (0.5, 0.35, 0.1),\n # (0.4, 0.2, 0.07),\n (0.4, 0.22, 0.07),\n # (0.37, 0.2, 0.06),\n ]\n\n # (x, y, long_axis, short_axis, radius [-pi, pi])\n # vehicle_length = long_axis * 2\n # vehicle_width = short_axis * 2\n obstacle_list = obstacle_uncertainty_fusion(obstacle_list_gt, obstacle_list_uncertainty)\n\n # Set Initial parameters\n cl_rrt = CLRRT(\n car=car,\n start=start,\n goal=goal,\n rand_area=area,\n obstacle_list=obstacle_list)\n # path = cl_rrt.planning(animation=False)\n cl_rrt.planning(animation=False)\n # print(cl_rrt.check_chance_constrain(cl_rrt.end, cl_rrt.p_safe))\n # print(cl_rrt.check_chance_constrain(cl_rrt.start, cl_rrt.p_safe))\n # if path is None:\n # print(\"Cannot find path\")\n # else:\n # print(\"found path!!\")\n\n # # Draw final path\n # if show_animation:\n # cl_rrt.draw_graph()\n # plt.plot([x for (x, y) in path], [y for (x, y) in path], '-r')\n # plt.grid(True)\n # plt.pause(0.01) # Need for Mac\n # plt.show()\n cl_rrt.draw_graph()\n cl_rrt.draw_path()\n draw_vehicle(obstacle_list_gt)\n draw_carsize_of_final_path(car, cl_rrt.path)\n\n plt.figure(2)\n tmp = [node.cc for node in cl_rrt.path]\n path_min = np.min(tmp)\n path_max = np.max(tmp)\n path_avg = np.average(tmp)\n plt.scatter([node.x for node in cl_rrt.node_list], \n [node.y for node in cl_rrt.node_list], \n s=3, \n c=[node.cc for node in cl_rrt.node_list], \n cmap='jet')\n plt.plot([node.x for node in cl_rrt.path],\n [node.y for node in cl_rrt.path],\n c='k',\n label=\"path risk value:\\nmin: %.6f\\nmax: %.6f\\navg: %.6f\"%(path_min, path_max, path_avg))\n plt.colorbar()\n plt.axis([area[0], area[1], area[2], area[3]])\n plt.legend(loc='upper right')\n plt.grid(True)\n plt.show()\n # plt.savefig(\"cc-rrt-h-fun-3.png\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"v3/cl_rrt.py","file_name":"cl_rrt.py","file_ext":"py","file_size_in_byte":10096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"145376686","text":"# 使用requests库获取豆瓣250\nimport requests\n\nuser_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'\nheader = {'user-agent': user_agent}\nmyurl = 'https://movie.douban.com/top250'\n\nresp = requests.get(myurl, headers=header)\nprint(resp.text)\nprint(f'状态码: {resp.status_code}')\n","sub_path":"week02/practice/requests_test/requests_douban_v1.py","file_name":"requests_douban_v1.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"171251760","text":"# 오늘이 학기 중인지, 주말인지 구별하는 코드\ndef is_semester(month, day):\n semester = [3, 4, 5, 9, 10, 11]\n vacation = [1, 2, 7, 8]\n if month in semester:\n return True\n elif month in vacation:\n return False\n else:\n if day < 22:\n return True\n else:\n return False","sub_path":"hyuabot/transport/shuttle/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"143463350","text":"# ------------------------------------------------------------------------------\n\"\"\"\n Configuration file of the iptyhon profile \"ipon\". \n \n Author: Rémi Mignot, 2020.\n\"\"\"\n\n# ------------------------------------------------------------------------------\n\"\"\"\n Remarks: \n - Here is only the default profile configuration file, that I modified. \n - Web pages to see: \n https://stackoverflow.com/questions/55456507/how-to-change-ipython-shell-text-literal-color-windows-powershell \n https://stackoverflow.com/questions/14129278/how-do-i-customize-text-color-in-ipython \n https://gist.github.com/rsvp/ce3d64633713830fbea406f55adcd5e5#file-ipython_config-py-L139 \n https://gist.github.com/bobmurder/4116245 \n\"\"\" \n\n\n## =============================================================================\n## Basic color scheme that will be modified\n##\n\ncolorLabel = 'NoColor'\nc.InteractiveShell.colors = colorLabel\nc.TerminalInteractiveShell.highlighting_style = 'emacs'\n\n# 'emacs' -> Good with my changes, below,... \n# The other possibilities are: \n# paraiso-light, xcode, lovelace, borland, monokai, emacs, algol_nu, \n# algol, perldoc, autumn, colorful, default, tango, manni, igor, \n# friendly, murphy, native, fruity, vim, paraiso-dark, bw, rrt, trac,\n# pastie, arduino, vs\n# \n\n\n# ------------------------------------------------------------------------------\n# Interpreter colors\n# \nfrom pygments.token import Token, Keyword, Name, Comment, String, Error, \\\n Number, Operator, Generic, Whitespace\n\nc.TerminalInteractiveShell.highlighting_style_overrides = {\n\n # --- Others ??? ---\n \n Token.Error: \"#ff0000 nobold\",\n Token.Escape: \"#ff0000 nobold\",\n Token.Generic: \"#ff0000 nobold\",\n Token.Literal: \"#ff0000 nobold\",\n Token.Other: \"#ff0000 nobold\",\n Token.Text: \"#ff0000 nobold\",\n Token.Token: \"#ff0000 nobold\",\n Whitespace: \"#ff0000 nobold\",\n Token.Name.Namespace: \"#ff0000 nobold\",\n Comment: \"#ff0000 nobold\",\n Comment.Preproc: \"#ff0000 nobold\",\n Comment.Special: \"#ff0000 nobold\",\n Keyword: \"#ff0000 nobold\",\n Keyword.Pseudo: \"#ff0000 nobold\",\n Keyword.Type: \"#ff0000 nobold\",\n Operator: \"#ff0000 nobold\",\n Name.Function: \"#ff0000 nobold\",\n Name.Class: \"#ff0000 nobold\", \n Name.Exception: \"#ff0000 nobold\",\n Name.Variable: \"#ff0000 nobold\",\n Name.Constant: \"#ff0000 nobold\",\n Name.Label: \"#ff0000 nobold\",\n Name.Entity: \"#ff0000 nobold\",\n Name.Attribute: \"#ff0000 nobold\",\n Name.Tag: \"#ff0000 nobold\",\n Name.Decorator: \"#ff0000 nobold\", \n String.Escape: \"#ff0000 nobold\",\n String.Regex: \"#ff0000 nobold\",\n String.Symbol: \"#ff0000 nobold\",\n String.Other: \"#ff0000 nobold\",\n Generic.Heading: \"#ff0000 nobold\",\n Generic.Subheading: \"#ff0000 nobold\",\n Generic.Deleted: \"#ff0000 nobold\",\n Generic.Inserted: \"#ff0000 nobold\",\n Generic.Error: \"#ff0000 nobold\",\n Generic.Emph: \"#ff0000 nobold\",\n Generic.Strong: \"#ff0000 nobold\",\n Generic.Prompt: \"#ff0000 nobold\",\n Generic.Output: \"#ff0000 nobold\",\n Generic.Traceback: \"#ff0000 nobold\",\n Error: \"#ff0000 nobold\", \n \n # --- OK: ---\n \n Name.Builtin: \"#8080FF nobold\", \n # abs, delattr, hash, memoryview, set, all, dict, min, setattr, any, \n # dir, hex, next, slice, divmod, id, object, sorted, bin, enumerate, \n # input, oct, staticmethod, bool, eval, int, open, str, isinstance, \n # ord, sum, bytearray, filter, issubclass, pow, super, bytes, float, \n # iter, print, tuple, format, len, property, type, chr, frozenset, \n # list, range, vars, classmethod, getattr, locals, repr, zip, compile, \n # globals, map, reversed, __import__, complex, hasattr, max, round\n \n # For prompt\n Token.Prompt: \"#4242FF bold\", \n Token.PromptNum: \"#4242FF bold\", \n Token.OutPrompt: \"#4242FF bold\", \n Token.OutPromptNum: \"#4242FF bold\", \n \n String.Doc: \"#33CF33 nobold\",\n Token.String: \"#FF65FF nobold\", \n String: \"#FF65FF nobold\",\n String.Interpol: \"#F38472 nobold\", # \"Sum of {0} and {1} is {2}\"\n \n Token.Punctuation: \"#82BBCA nobold\", # , ; : () [] {}\n Token.Name: \"#ffffff nobold\", \n Token.Name.Function: \"#4D5CFF nobold\", # after def, \n Token.Name.Class: \"#4D5CFF nobold\", # after class, \n Name.Namespace: \"#4D5CFF nobold\",\n Token.Comment: \"#33CF33 nobold\", # Regular comments \n Number: \"#FFAE43 nobold\", # Numbers \n Token.Number: \"#FFAE43 nobold\",\n Operator.Word: \"#F8FF00 nobold\", # and, in, not, or, is. \n Token.Operator: \"#F8FF00 nobold\", # + * - % // ** = += *= == < >= & ~ etc.\n \n Token.Keyword: \"#6666FF nobold\", \n # False, True, None, as, def, except, finally, continue, break, for, \n # while, if, else, elif, with, lambda, nonlocal, del, global, yield, \n # assert, pass, raise, return. \n}\n\nfrom IPython.core import excolors, ultratb, debugger\nfrom IPython.core.excolors import exception_colors as exception_colors_orig\n\n\n# ------------------------------------------------------------------------------\n# Add new color labels here before attributing them, used below\n# \nfrom IPython.utils import coloransi\n\ncoloransi.color_templates = (\n \n # Dark colors (no bold)\n (\"Black\" , \"0;30\"),\n (\"Red\" , \"0;31\"),\n (\"Green\" , \"0;32\"),\n (\"Brown\" , \"0;33\"),\n (\"Blue\" , \"0;34\"),\n (\"Purple\" , \"0;35\"),\n (\"Cyan\" , \"0;36\"),\n (\"LightGray\" , \"0;37\"),\n\n # Light colors (bold)\n (\"DarkGray\" , \"1;30\"),\n (\"LightRed\" , \"1;31\"),\n (\"LightGreen\" , \"1;32\"),\n (\"Yellow\" , \"1;33\"),\n (\"LightBlue\" , \"1;34\"),\n (\"LightPurple\" , \"1;35\"),\n (\"LightCyan\" , \"1;36\"),\n (\"White\" , \"1;37\"),\n \n ## 256-colors\n (\"Green108\", \"38;5;108\"),\n)\n\ncoloransi.make_color_table(coloransi.TermColors)\ncoloransi.make_color_table(coloransi.InputTermColors)\n\nfor name, value in coloransi.color_templates:\n setattr(coloransi.NoColors, name, '')\n\nC = coloransi.TermColors\nIC = coloransi.InputTermColors\n\n\n# ------------------------------------------------------------------------------\n# For Exception messages (very important for the customization) \n# -> remark, do not use #RGB.\n# \ndef exception_colors(): \n ex_colors = exception_colors_orig()\n \n ex_colors.add_scheme(coloransi.ColorScheme( \n colorLabel,\n\n# -> voir pour reprendre des coleurs proche d'au-dessus pour la coloration du code... \n\n # The color to be used for the top line\n topline = C.Red, #LightRed,\n\n # The colors to be used in the traceback\n filename = C.Purple, # The name of the file\n lineno = C.Normal, # Some line numbers...\n name = C.LightRed, # The NAME.\n vName = C.LightRed, # ...\n val = C.Brown, # ??? \n em = C.Brown, # ???\n \n # Emphasized colors for the last frame of the traceback\n normalEm = C.Brown, # (most recent call last), File, line, in...\n filenameEm = C.Purple, # Name of files...\n linenoEm = C.Normal, # Some line numbers...\n nameEm = C.Brown, # ...\n valEm = C.Brown, # () ...\n\n # Colors for printing the exception\n excName = C.Red, # NameError\n line = C.LightRed, # The errored code line\n caret = C.White, # ^ \n Normal = C.Brown, # File, exec(code_obj, ..., invalid syntax, ...\n ))\n return ex_colors\n\nexcolors.exception_colors = exception_colors\nultratb.exception_colors = exception_colors\ndebugger.exception_colors = exception_colors\n\n\n# ------------------------------------------------------------------------------\n# No newlines after input and output:\n# \nc.TerminalInteractiveShell.separate_in = ''\nc.TerminalInteractiveShell.separate_out = ''\nc.TerminalInteractiveShell.separate_out2 = ''\n\n\n## =============================================================================\n# ORIGINAL: \n\n# Configuration file for ipython.\n\n#------------------------------------------------------------------------------\n# InteractiveShellApp(Configurable) configuration\n#------------------------------------------------------------------------------\n\n## A Mixin for applications that start InteractiveShell instances.\n# \n# Provides configurables for loading extensions and executing files as part of\n# configuring a Shell environment.\n# \n# The following methods should be called by the :meth:`initialize` method of the\n# subclass:\n# \n# - :meth:`init_path`\n# - :meth:`init_shell` (to be implemented by the subclass)\n# - :meth:`init_gui_pylab`\n# - :meth:`init_extensions`\n# - :meth:`init_code`\n\n## Execute the given command string.\n#c.InteractiveShellApp.code_to_run = ''\n\n## Run the file referenced by the PYTHONSTARTUP environment variable at IPython startup.\n#c.InteractiveShellApp.exec_PYTHONSTARTUP = True\n\n## List of files to run at IPython startup.\n#c.InteractiveShellApp.exec_files = []\n\n## lines of code to run at IPython startup.\n#c.InteractiveShellApp.exec_lines = []\n\n## A list of dotted module names of IPython extensions to load.\n#c.InteractiveShellApp.extensions = []\n\n## dotted module name of an IPython extension to load.\n#c.InteractiveShellApp.extra_extension = ''\n\n## A file to be run\n#c.InteractiveShellApp.file_to_run = ''\n\n## Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk2', 'gtk3',\n# 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4').\n#c.InteractiveShellApp.gui = None\n\n## Should variables loaded at startup (by startup files, exec_lines, etc.) be\n# hidden from tools like %who?\n#c.InteractiveShellApp.hide_initial_ns = True\n\n## Configure matplotlib for interactive use with the default matplotlib backend.\n#c.InteractiveShellApp.matplotlib = None\n\n## Run the module as a script.\n#c.InteractiveShellApp.module_to_run = ''\n\n## Pre-load matplotlib and numpy for interactive use, selecting a particular\n# matplotlib backend and loop integration.\n#c.InteractiveShellApp.pylab = None\n\n## If true, IPython will populate the user namespace with numpy, pylab, etc. and\n# an ``import *`` is done from numpy and pylab, when using pylab mode.\n# \n# When False, pylab mode should not import any names into the user namespace.\n#c.InteractiveShellApp.pylab_import_all = True\n\n## Reraise exceptions encountered loading IPython extensions?\n#c.InteractiveShellApp.reraise_ipython_extension_failures = False\n\n#------------------------------------------------------------------------------\n# Application(SingletonConfigurable) configuration\n#------------------------------------------------------------------------------\n\n## This is an application.\n\n## The date format used by logging formatters for %(asctime)s\n#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'\n\n## The Logging format template\n#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'\n\n## Set the log level by value or name.\n#c.Application.log_level = 30\n\n#------------------------------------------------------------------------------\n# BaseIPythonApplication(Application) configuration\n#------------------------------------------------------------------------------\n\n## IPython: an enhanced interactive Python shell.\n\n## Whether to create profile dir if it doesn't exist\n#c.BaseIPythonApplication.auto_create = False\n\n## Whether to install the default config files into the profile dir. If a new\n# profile is being created, and IPython contains config files for that profile,\n# then they will be staged into the new directory. Otherwise, default config\n# files will be automatically generated.\n#c.BaseIPythonApplication.copy_config_files = False\n\n## Path to an extra config file to load.\n# \n# If specified, load this config file in addition to any other IPython config.\n#c.BaseIPythonApplication.extra_config_file = ''\n\n## The name of the IPython directory. This directory is used for logging\n# configuration (through profiles), history storage, etc. The default is usually\n# $HOME/.ipython. This option can also be specified through the environment\n# variable IPYTHONDIR.\n#c.BaseIPythonApplication.ipython_dir = ''\n\n## Whether to overwrite existing config files when copying\n#c.BaseIPythonApplication.overwrite = False\n\n## The IPython profile to use.\n#c.BaseIPythonApplication.profile = 'default'\n\n## Create a massive crash report when IPython encounters what may be an internal\n# error. The default is to append a short message to the usual traceback\n#c.BaseIPythonApplication.verbose_crash = False\n\n#------------------------------------------------------------------------------\n# TerminalIPythonApp(BaseIPythonApplication,InteractiveShellApp) configuration\n#------------------------------------------------------------------------------\n\n## Whether to display a banner upon starting IPython.\n#c.TerminalIPythonApp.display_banner = True\n\n## If a command or file is given via the command-line, e.g. 'ipython foo.py',\n# start an interactive shell after executing the file or command.\n#c.TerminalIPythonApp.force_interact = False\n\n## Class to use to instantiate the TerminalInteractiveShell object. Useful for\n# custom Frontends\n#c.TerminalIPythonApp.interactive_shell_class = 'IPython.terminal.interactiveshell.TerminalInteractiveShell'\n\n## Start IPython quickly by skipping the loading of config files.\n#c.TerminalIPythonApp.quick = False\n\n#------------------------------------------------------------------------------\n# InteractiveShell(SingletonConfigurable) configuration\n#------------------------------------------------------------------------------\n\n## An enhanced, interactive shell for Python.\n\n## 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run\n# interactively (displaying output from expressions).\n#c.InteractiveShell.ast_node_interactivity = 'last_expr'\n\n## A list of ast.NodeTransformer subclass instances, which will be applied to\n# user input before code is run.\n#c.InteractiveShell.ast_transformers = []\n\n## Make IPython automatically call any callable object even if you didn't type\n# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.\n# The value can be '0' to disable the feature, '1' for 'smart' autocall, where\n# it is not applied if there are no more arguments on the line, and '2' for\n# 'full' autocall, where all callable objects are automatically called (even if\n# no arguments are present).\n#c.InteractiveShell.autocall = 0\n\n## Autoindent IPython code entered interactively.\n#c.InteractiveShell.autoindent = True\n\n## Enable magic commands to be called without the leading %.\n#c.InteractiveShell.automagic = True\n\n## The part of the banner to be printed before the profile\n#c.InteractiveShell.banner1 = \"Python 3.5.2 (default, Oct 8 2019, 13:06:37) \\nType 'copyright', 'credits' or 'license' for more information\\nIPython 6.1.0 -- An enhanced Interactive Python. Type '?' for help.\\n\"\n\n## The part of the banner to be printed after the profile\n#c.InteractiveShell.banner2 = ''\n\n## Set the size of the output cache. The default is 1000, you can change it\n# permanently in your config file. Setting it to 0 completely disables the\n# caching system, and the minimum value accepted is 3 (if you provide a value\n# less than 3, it is reset to 0 and a warning is issued). This limit is defined\n# because otherwise you'll spend more time re-flushing a too small cache than\n# working\n#c.InteractiveShell.cache_size = 1000\n\n## Use colors for displaying information about objects. Because this information\n# is passed through a pager (like 'less'), and some pagers get confused with\n# color codes, this capability can be turned off.\n#c.InteractiveShell.color_info = True\n\n## Set the color scheme (NoColor, Neutral, Linux, or LightBG).\n#c.InteractiveShell.colors = 'Neutral'\n\n## \n#c.InteractiveShell.debug = False\n\n## Don't call post-execute functions that have failed in the past.\n#c.InteractiveShell.disable_failing_post_execute = False\n\n## If True, anything that would be passed to the pager will be displayed as\n# regular output instead.\n#c.InteractiveShell.display_page = False\n\n## (Provisional API) enables html representation in mime bundles sent to pagers.\n#c.InteractiveShell.enable_html_pager = False\n\n## Total length of command history\n#c.InteractiveShell.history_length = 10000\n\n## The number of saved history entries to be loaded into the history buffer at\n# startup.\n#c.InteractiveShell.history_load_length = 1000\n\n## \n#c.InteractiveShell.ipython_dir = ''\n\n## Start logging to the given file in append mode. Use `logfile` to specify a log\n# file to **overwrite** logs to.\n#c.InteractiveShell.logappend = ''\n\n## The name of the logfile to use.\n#c.InteractiveShell.logfile = ''\n\n## Start logging to the default log file in overwrite mode. Use `logappend` to\n# specify a log file to **append** logs to.\n#c.InteractiveShell.logstart = False\n\n## \n#c.InteractiveShell.object_info_string_level = 0\n\n## Automatically call the pdb debugger after every exception.\n#c.InteractiveShell.pdb = False\n\n## Deprecated since IPython 4.0 and ignored since 5.0, set\n# TerminalInteractiveShell.prompts object directly.\n#c.InteractiveShell.prompt_in1 = 'In [\\\\#]: '\n\n## Deprecated since IPython 4.0 and ignored since 5.0, set\n# TerminalInteractiveShell.prompts object directly.\n#c.InteractiveShell.prompt_in2 = ' .\\\\D.: '\n\n## Deprecated since IPython 4.0 and ignored since 5.0, set\n# TerminalInteractiveShell.prompts object directly.\n#c.InteractiveShell.prompt_out = 'Out[\\\\#]: '\n\n## Deprecated since IPython 4.0 and ignored since 5.0, set\n# TerminalInteractiveShell.prompts object directly.\n#c.InteractiveShell.prompts_pad_left = True\n\n## \n#c.InteractiveShell.quiet = False\n\n## \n#c.InteractiveShell.separate_in = '\\n'\n\n## \n#c.InteractiveShell.separate_out = ''\n\n## \n#c.InteractiveShell.separate_out2 = ''\n\n## Show rewritten input, e.g. for autocall.\n#c.InteractiveShell.show_rewritten_input = True\n\n## Enables rich html representation of docstrings. (This requires the docrepr\n# module).\n#c.InteractiveShell.sphinxify_docstring = False\n\n## \n#c.InteractiveShell.wildcards_case_sensitive = True\n\n## Switch modes for the IPython exception handlers.\n#c.InteractiveShell.xmode = 'Context'\n\n#------------------------------------------------------------------------------\n# TerminalInteractiveShell(InteractiveShell) configuration\n#------------------------------------------------------------------------------\n\n## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,\n# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a\n# direct exit without any confirmation.\n#c.TerminalInteractiveShell.confirm_exit = True\n\n## Options for displaying tab completions, 'column', 'multicolumn', and\n# 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit`\n# documentation for more information.\n#c.TerminalInteractiveShell.display_completions = 'multicolumn'\n\n## Shortcut style to use at the prompt. 'vi' or 'emacs'.\n#c.TerminalInteractiveShell.editing_mode = 'emacs'\n\n## Set the editor used by IPython (default to $EDITOR/vi/notepad).\n#c.TerminalInteractiveShell.editor = 'vi'\n\n## Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. This is\n# in addition to the F2 binding, which is always enabled.\n#c.TerminalInteractiveShell.extra_open_editor_shortcuts = False\n\n## Provide an alternative handler to be called when the user presses Return. This\n# is an advanced option intended for debugging, which may be changed or removed\n# in later releases.\n#c.TerminalInteractiveShell.handle_return = None\n\n## Highlight matching brackets.\n#c.TerminalInteractiveShell.highlight_matching_brackets = True\n\n## The name or class of a Pygments style to use for syntax\n# highlighting: \n# fruity, autumn, igor, abap, algol_nu, solarized-light, emacs, xcode, pastie, rainbow_dash, monokai, vs, stata-dark, trac, murphy, tango, solarized-dark, borland, stata, paraiso-dark, perldoc, default, sas, rrt, paraiso-light, vim, arduino, friendly, manni, stata-light, colorful, bw, algol, native, lovelace, inkpot\n#c.TerminalInteractiveShell.highlighting_style = traitlets.Undefined\n\n## Override highlighting format for specific tokens\n#c.TerminalInteractiveShell.highlighting_style_overrides = {}\n\n## Enable mouse support in the prompt\n#c.TerminalInteractiveShell.mouse_support = False\n\n## Class used to generate Prompt token for prompt_toolkit\n#c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'\n\n## Use `raw_input` for the REPL, without completion and prompt colors.\n# \n# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.\n# Known usage are: IPython own testing machinery, and emacs inferior-shell\n# integration through elpy.\n# \n# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment\n# variable is set, or the current terminal is not a tty.\n#c.TerminalInteractiveShell.simple_prompt = False\n\n## Number of line at the bottom of the screen to reserve for the completion menu\n#c.TerminalInteractiveShell.space_for_menu = 6\n\n## Automatically set the terminal title\n#c.TerminalInteractiveShell.term_title = True\n\n## Customize the terminal title format. This is a python format string.\n# Available substitutions are: {cwd}.\n#c.TerminalInteractiveShell.term_title_format = 'IPython: {cwd}'\n\n## Use 24bit colors instead of 256 colors in prompt highlighting. If your\n# terminal supports true color, the following command should print 'TRUECOLOR'\n# in orange: printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\"\n#c.TerminalInteractiveShell.true_color = False\n\n#------------------------------------------------------------------------------\n# HistoryAccessor(HistoryAccessorBase) configuration\n#------------------------------------------------------------------------------\n\n## Access the history database without adding to it.\n# \n# This is intended for use by standalone history tools. IPython shells use\n# HistoryManager, below, which is a subclass of this.\n\n## Options for configuring the SQLite connection\n# \n# These options are passed as keyword args to sqlite3.connect when establishing\n# database conenctions.\n#c.HistoryAccessor.connection_options = {}\n\n## enable the SQLite history\n# \n# set enabled=False to disable the SQLite history, in which case there will be\n# no stored history, no SQLite connection, and no background saving thread.\n# This may be necessary in some threaded environments where IPython is embedded.\n#c.HistoryAccessor.enabled = True\n\n## Path to file to use for SQLite history database.\n# \n# By default, IPython will put the history database in the IPython profile\n# directory. If you would rather share one history among profiles, you can set\n# this value in each, so that they are consistent.\n# \n# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.\n# If you see IPython hanging, try setting this to something on a local disk,\n# e.g::\n# \n# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite\n# \n# you can also use the specific value `:memory:` (including the colon at both\n# end but not the back ticks), to avoid creating an history file.\n#c.HistoryAccessor.hist_file = ''\n\n#------------------------------------------------------------------------------\n# HistoryManager(HistoryAccessor) configuration\n#------------------------------------------------------------------------------\n\n## A class to organize all history-related functionality in one place.\n\n## Write to database every x commands (higher values save disk access & power).\n# Values of 1 or less effectively disable caching.\n#c.HistoryManager.db_cache_size = 0\n\n## Should the history database include output? (default: no)\n#c.HistoryManager.db_log_output = False\n\n#------------------------------------------------------------------------------\n# ProfileDir(LoggingConfigurable) configuration\n#------------------------------------------------------------------------------\n\n## An object to manage the profile directory and its resources.\n# \n# The profile directory is used by all IPython applications, to manage\n# configuration, logging and security.\n# \n# This object knows how to find, create and manage these directories. This\n# should be used by any code that wants to handle profiles.\n\n## Set the profile location directly. This overrides the logic used by the\n# `profile` option.\n#c.ProfileDir.location = ''\n\n#------------------------------------------------------------------------------\n# BaseFormatter(Configurable) configuration\n#------------------------------------------------------------------------------\n\n## A base formatter class that is configurable.\n# \n# This formatter should usually be used as the base class of all formatters. It\n# is a traited :class:`Configurable` class and includes an extensible API for\n# users to determine how their objects are formatted. The following logic is\n# used to find a function to format an given object.\n# \n# 1. The object is introspected to see if it has a method with the name\n# :attr:`print_method`. If is does, that object is passed to that method\n# for formatting.\n# 2. If no print method is found, three internal dictionaries are consulted\n# to find print method: :attr:`singleton_printers`, :attr:`type_printers`\n# and :attr:`deferred_printers`.\n# \n# Users should use these dictionaries to register functions that will be used to\n# compute the format data for their objects (if those objects don't have the\n# special print methods). The easiest way of using these dictionaries is through\n# the :meth:`for_type` and :meth:`for_type_by_name` methods.\n# \n# If no function/callable is found to compute the format data, ``None`` is\n# returned and this format type is not used.\n\n## \n#c.BaseFormatter.deferred_printers = {}\n\n## \n#c.BaseFormatter.enabled = True\n\n## \n#c.BaseFormatter.singleton_printers = {}\n\n## \n#c.BaseFormatter.type_printers = {}\n\n#------------------------------------------------------------------------------\n# PlainTextFormatter(BaseFormatter) configuration\n#------------------------------------------------------------------------------\n\n## The default pretty-printer.\n# \n# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.\n# If the object cannot be pretty printed, :func:`repr` is used. See the\n# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty\n# printers. Here is a simple example::\n# \n# def dtype_pprinter(obj, p, cycle):\n# if cycle:\n# return p.text('dtype(...)')\n# if hasattr(obj, 'fields'):\n# if obj.fields is None:\n# p.text(repr(obj))\n# else:\n# p.begin_group(7, 'dtype([')\n# for i, field in enumerate(obj.descr):\n# if i > 0:\n# p.text(',')\n# p.breakable()\n# p.pretty(field)\n# p.end_group(7, '])')\n\n## \n#c.PlainTextFormatter.float_precision = ''\n\n## Truncate large collections (lists, dicts, tuples, sets) to this size.\n# \n# Set to 0 to disable truncation.\n#c.PlainTextFormatter.max_seq_length = 1000\n\n## \n#c.PlainTextFormatter.max_width = 79\n\n## \n#c.PlainTextFormatter.newline = '\\n'\n\n## \n#c.PlainTextFormatter.pprint = True\n\n## \n#c.PlainTextFormatter.verbose = False\n\n#------------------------------------------------------------------------------\n# Completer(Configurable) configuration\n#------------------------------------------------------------------------------\n\n## Enable unicode completions, e.g. \\alpha . Includes completion of latex\n# commands, unicode names, and expanding unicode characters back to latex\n# commands.\n#c.Completer.backslash_combining_completions = True\n\n## Enable debug for the Completer. Mostly print extra information for\n# experimental jedi integration.\n#c.Completer.debug = False\n\n## Activate greedy completion PENDING DEPRECTION. this is now mostly taken care\n# of with Jedi.\n# \n# This will enable completion on elements of lists, results of function calls,\n# etc., but can be unsafe because the code is actually evaluated on TAB.\n#c.Completer.greedy = False\n\n## Experimental: restrict time (in milliseconds) during which Jedi can compute\n# types. Set to 0 to stop computing types. Non-zero value lower than 100ms may\n# hurt performance by preventing jedi to build its cache.\n#c.Completer.jedi_compute_type_timeout = 400\n\n## Experimental: Use Jedi to generate autocompletions. Default to True if jedi is\n# installed\n#c.Completer.use_jedi = True\n\n#------------------------------------------------------------------------------\n# IPCompleter(Completer) configuration\n#------------------------------------------------------------------------------\n\n## Extension of the completer class with IPython-specific features\n\n## DEPRECATED as of version 5.0.\n# \n# Instruct the completer to use __all__ for the completion\n# \n# Specifically, when completing on ``object.``.\n# \n# When True: only those names in obj.__all__ will be included.\n# \n# When False [default]: the __all__ attribute is ignored\n#c.IPCompleter.limit_to__all__ = False\n\n## Whether to merge completion results into a single list\n# \n# If False, only the completion results from the first non-empty completer will\n# be returned.\n#c.IPCompleter.merge_completions = True\n\n## Instruct the completer to omit private method names\n# \n# Specifically, when completing on ``object.``.\n# \n# When 2 [default]: all names that start with '_' will be excluded.\n# \n# When 1: all 'magic' names (``__foo__``) will be excluded.\n# \n# When 0: nothing will be excluded.\n#c.IPCompleter.omit__names = 2\n\n#------------------------------------------------------------------------------\n# ScriptMagics(Magics) configuration\n#------------------------------------------------------------------------------\n\n## Magics for talking to scripts\n# \n# This defines a base `%%script` cell magic for running a cell with a program in\n# a subprocess, and registers a few top-level magics that call %%script with\n# common interpreters.\n\n## Extra script cell magics to define\n# \n# This generates simple wrappers of `%%script foo` as `%%foo`.\n# \n# If you want to add script magics that aren't on your path, specify them in\n# script_paths\n#c.ScriptMagics.script_magics = []\n\n## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'\n# \n# Only necessary for items in script_magics where the default path will not find\n# the right interpreter.\n#c.ScriptMagics.script_paths = {}\n\n#------------------------------------------------------------------------------\n# LoggingMagics(Magics) configuration\n#------------------------------------------------------------------------------\n\n## Magics related to all logging machinery.\n\n## Suppress output of log state when logging is enabled\n#c.LoggingMagics.quiet = False\n\n#------------------------------------------------------------------------------\n# StoreMagics(Magics) configuration\n#------------------------------------------------------------------------------\n\n## Lightweight persistence for python variables.\n# \n# Provides the %store magic.\n\n## If True, any %store-d variables will be automatically restored when IPython\n# starts.\n#c.StoreMagics.autorestore = False\n\n\n","sub_path":"ipon/data/profile/ipython_config.py","file_name":"ipython_config.py","file_ext":"py","file_size_in_byte":31886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"41542107","text":"import googleapiclient.discovery\nimport os.path\nimport hashlib\nimport tempfile\n\nfrom .apps import DCTConfig\n\n\nclass DiscoveryCache:\n \"\"\"\n Unix file-based cache for use with the API Discovery service\n\n See https://github.com/googleapis/google-api-python-client/issues/325#issuecomment-419387788\n \"\"\"\n def filename(self, url):\n return os.path.join(\n tempfile.gettempdir(),\n 'google_api_discovery_' + hashlib.md5(url.encode()).hexdigest())\n\n def get(self, url):\n try:\n with open(self.filename(url), 'rb') as f:\n return f.read().decode()\n except FileNotFoundError:\n return None\n\n def set(self, url, content):\n with tempfile.NamedTemporaryFile(delete=False) as f:\n f.write(content.encode())\n f.flush()\n os.fsync(f)\n os.rename(f.name, self.filename(url))\n\n\nclass cached_property(object):\n def __init__(self, fget):\n self.fget = fget\n self.func_name = fget.__name__\n\n def __get__(self, obj, cls):\n if obj is None:\n return None\n value = self.fget(obj)\n setattr(obj, self.func_name, value)\n return value\n\n\nclass GoogleCloudClient(object):\n @cached_property\n def client(self):\n client = googleapiclient.discovery.build('cloudtasks', 'v2beta3',\n credentials=DCTConfig.google_cloud_credentials(), \n cache=DiscoveryCache())\n return client\n\n @cached_property\n def tasks_endpoint(self):\n client = self.client\n tasks_endpoint = client.projects().locations().queues().tasks()\n return tasks_endpoint\n\n\nconnection = GoogleCloudClient()\n","sub_path":"django_cloud_tasks/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"465588030","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('myblog', '0002_users'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Blog',\n fields=[\n ('blog_id', models.AutoField(serialize=False, primary_key=True)),\n ('title', models.CharField(max_length=128, verbose_name=b'Titile')),\n ('content', models.TextField(verbose_name=b'Content')),\n ('pub_data', models.DateTimeField(auto_now_add=True)),\n ('edit_date', models.DateTimeField(auto_now=True)),\n ('user_id', models.ForeignKey(to='myblog.Users')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Cat_Blog_Map',\n fields=[\n ('map_id', models.AutoField(serialize=False, primary_key=True)),\n ('blog_id', models.ForeignKey(to='myblog.Blog')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Category',\n fields=[\n ('cat_id', models.AutoField(serialize=False, primary_key=True)),\n ('name', models.CharField(max_length=64, verbose_name=b'Category Name')),\n ('parent_id', models.ForeignKey(to='myblog.Category', blank=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('comment_id', models.AutoField(serialize=False, primary_key=True)),\n ('comment', models.TextField(verbose_name=b'Comment')),\n ('pub_date', models.DateTimeField(auto_now_add=True)),\n ('post_id', models.ForeignKey(to='myblog.Blog')),\n ('user_id', models.ForeignKey(to='myblog.Users')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Images',\n fields=[\n ('img_id', models.AutoField(serialize=False, primary_key=True)),\n ('img_name', models.CharField(max_length=64, verbose_name=b'Image Name')),\n ('img_url', models.CharField(max_length=256, verbose_name=b'Image URL')),\n ('blog_id', models.ForeignKey(to='myblog.Blog')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RemoveField(\n model_name='post',\n name='author',\n ),\n migrations.DeleteModel(\n name='Post',\n ),\n migrations.AddField(\n model_name='cat_blog_map',\n name='cat_id',\n field=models.ForeignKey(to='myblog.Category'),\n preserve_default=True,\n ),\n ]\n","sub_path":"myblog/migrations/0003_auto_20150212_1240.py","file_name":"0003_auto_20150212_1240.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"609976695","text":"from data_splitter import partnerDataSplitter\nfrom partner_data_reader import PartnerDataReader\nfrom optimizer import Optimizer\nimport json\nfrom datetime import datetime\nfrom matplotlib import pyplot as plt\nimport pandas as pd\n\n\nclass SimulationCore():\n def __init__(self):\n self.optimizer = None\n\n def split_dataset_into_partners(self):\n partnerDataSplitter.split_dataSet_into_partners()\n\n def run_simulation_for_particular_partner(self, partner_id: str):\n partner = PartnerDataReader(\n partner_id)\n\n clickCost = partner.average_click_cost\n\n self.optimizer = Optimizer(\n partner.grouped_by_day, clickCost)\n\n for index in range(len(self.optimizer.data_from_partner) - 1):\n self.optimizer.optimize_day(\n self.optimizer.data_from_partner[index])\n\n self.optimizer.log_optimized_days(partner_id)\n\n def generate_plot_profit_gain(self):\n plt.plot(self.optimizer.profit_gain_list)\n plt.grid()\n plt.xlabel('Days of simulation')\n plt.ylabel('Profit gain')\n plt.legend(['C0F515F0A2D0A5D9F854008BA76EB537'])\n plt.show()\n\n def generate_plot_sustainded_profit(self):\n plt.plot(self.optimizer.sustained_profit_list)\n plt.grid()\n plt.xlabel('Days of simulation')\n plt.ylabel('Sustained profit')\n plt.legend(['C0F515F0A2D0A5D9F854008BA76EB537'])\n plt.show()\n\n def generate_plot_accumulated_profit_gain(self):\n plt.plot(self.optimizer.accumulated_profit_gain)\n plt.grid()\n plt.xlabel('Days of simulation')\n plt.ylabel('Accumulated profit gain')\n plt.legend(['C0F515F0A2D0A5D9F854008BA76EB537'])\n plt.show()\n\n def generate_plot_accumulated_sustained_gain(self):\n plt.plot(self.optimizer.accumulated_sustained_profit)\n plt.grid()\n plt.xlabel('Days of simulation')\n plt.ylabel('Accumulated sustained gain')\n plt.legend(['C0F515F0A2D0A5D9F854008BA76EB537'])\n plt.show()\n\n def generate_plot_profit_gain_and_sustained_profit(self):\n plt.plot(self.optimizer.profit_gain_list)\n plt.plot(self.optimizer.sustained_profit_list)\n plt.grid()\n plt.xlabel('Days of simulation')\n plt.ylabel('EUR')\n plt.legend(['Profit gain', 'Sustained gain'])\n plt.show()\n\n def generate_plot_accumulated_profit_gain_and_sustained_profit(self):\n plt.plot(self.optimizer.accumulated_profit_gain)\n plt.plot(self.optimizer.accumulated_sustained_profit)\n plt.grid()\n plt.xlabel('Days of simulation')\n plt.ylabel('EUR')\n plt.legend(['Accumulated profit gain', 'Accumulated sustained gain'])\n plt.show()\n\n def generate_plot_accumulated_profit_gain_ratio(self):\n plt.plot(self.optimizer.profit_ratio_list)\n plt.grid()\n plt.xlabel('Days of simulation')\n plt.ylabel('Accumulated profit gain ratio')\n plt.legend(['C0F515F0A2D0A5D9F854008BA76EB537'])\n plt.show()\n\n def check_logs_with_validation_data(self, partner_id: str):\n validationPath = f'resources/validation_data/partner_riegiel_id_{partner_id}.json'\n createdLogsPath = f'logs_partner_id_{partner_id}.json'\n\n with open(validationPath) as validationFile:\n validationData = json.load(validationFile)\n with open(createdLogsPath) as createdLog:\n createdData = json.load(createdLog)\n\n return (validationData == createdData)\n\n def create_csv_with_results(self):\n dataframe_dictionary = {'profit_gain_list': self.optimizer.profit_gain_list,\n 'sustained_profit_list': self.optimizer.sustained_profit_list,\n 'accumulated_profit_gain': self.optimizer.accumulated_profit_gain,\n 'accumulated_sustained_profit': self.optimizer.accumulated_sustained_profit,\n 'profit_ratio_list': self.optimizer.profit_ratio_list\n }\n dataframe = pd.DataFrame(dataframe_dictionary)\n\n dataframe.to_csv('results_dataframe_JE.csv')\n","sub_path":"src/simulation_core.py","file_name":"simulation_core.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"33770737","text":"class ReactExtractor :\n def __init__(self , web_element):\n self.web_element = web_element\n @property\n def extract_react (self):\n t=dict()\n t['J’aime ']=0\n t['J’adore ']=0\n t['Haha']=0\n t['Grrr']=0\n reaction_div = self.web_element.find_elements_by_css_selector('span._1n9k')\n for reactions in reaction_div :\n reaction = reactions.find_element_by_css_selector('a').get_attribute(\"aria-label\")\n if \"J’aime\" in reaction :\n t['J’aime '] = int(str( reaction.strip(\"J’aime\").replace(\" K\", \"000\").replace(\",\",\"\")))\n elif \"J’adore\" in reaction :\n t['J’adore ' ]= int(str(reaction.strip(\"J’adore\")))\n elif \"Haha\" in reaction :\n t['Haha'] = int(str(reaction.strip(\"Haha\")))\n elif \"Grrr\" in reaction :\n t['Grrr' ]= int(str(reaction.strip(\"Grrr\")))\n return [t['J’aime '],t['J’adore ' ], t['Haha'], t['Grrr' ]]\n","sub_path":"src/driver_actions/extract_reaction.py","file_name":"extract_reaction.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"240986469","text":"from datetime import datetime\nimport warnings\n\n# coding:utf-8\nimport requests\n\ndef get_data_by_post(cond_dic):\n r = requests.post(\"http://119.27.160.141:4242/api/query\", json=cond_dic)\n if len(r.json()) > 0:\n dps = r.json()[0]['dps']\n return dps\n else:\n return None\n\ndef build_post_json():\n cond_dic = {\n \"start\": 1490586530,\n # \"end\": 1489836195,\n \"queries\": [\n {\n \"aggregator\": \"sum\",\n \"metric\": \"sys.cpu.data\",\n # \"tags\": {\"host\": \"web01\"}\n },\n ]\n }\n return cond_dic\n\nif __name__ == \"__main__\":\n print(get_data_by_post(build_post_json()))","sub_path":"examples/open_tsdb/post_req.py","file_name":"post_req.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"121481116","text":"import pytest\nfrom flask import session\nfrom tests.pytest_fixtures import test_client, persistence\n\ndef test_admin_can_access_upload_page(test_client):\n\twith test_client.session_transaction() as session:\n\t\tsession['is_admin'] = True\n\t\n\tresponse = test_client.get('/upload-playtest')\n\n\tassert session['is_admin'] == True\n\tassert response.status_code == 200\n\ndef test_non_admin_cannot_access_upload_page(test_client):\n\tresponse = test_client.get('/upload-playtest')\n\n\tassert response.status_code == 401\n\ndef test_upload_page_has_correct_title(test_client):\n\twith test_client.session_transaction() as session:\n\t\tsession['is_admin'] = True\n\n\tresponse = test_client.get('/upload-playtest')\n\n\tassert \"Upload Playtest\" in response.data\n\n","sub_path":"tests/unittests/test_upload_playtest.py","file_name":"test_upload_playtest.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"90869780","text":"import argparse\nimport json\nimport os\n\ndef Main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"filepath\", help = \"path to folder downloaded from facebook\")\n args = parser.parse_args()\n\n myFB = Facebook(args.filepath)\n choose = input('Please choose a visualization:\\n1. What are my top 10 most used words? \\n2. Who have I pokde most? \\n3.Who are the friends I interact with the most? \\n4. What facebook knows about my off-facebook activity? \\n5. What facebook knows about my installed aoos?\\nYour choice: ')\n\n if int(choose):\n user_operation(myFB, int(choose))\n else:\n print('invalid input: enter a number')\n\ndef user_operation(fb, choose):\n \"\"\"Show something based on users' choice.\n\n Args:\n A Facebook instance, the number user entered.\n\n Returns:\n No return.\n \"\"\"\n\n if choose == 1:\n all_Comments = fb.comments()\n\n elif choose == 2:\n print('poked most')\n\n elif choose == 3:\n print('friends')\n\n elif choose == 4:\n offFB_activites = fb.offFB_activities()\n for activity in offFB_activites:\n if activity['name']:\n print(activity['name'])\n\n elif choose == 5:\n print('installed apps')\n\n else:\n print('???')\n\ndef comments_str(comments):\n \"\"\"Pick the content from comments dictionary and combine all of them into one string.\n\n Args:\n An array of comments. All the info are in dictioanry format.\n\n Returns:\n A string.\n \"\"\"\n\n comments_str = \"\"\n for aComments in comments:\n if \"data\" in aComments: # Ensure comment has data attached\n datas = aComments[\"data\"]\n for data in datas:\n str = data[\"comment\"][\"comment\"]\n comments_str = comments_str + \" \" + str\n return comments_str\n\nclass Facebook:\n \"\"\"A Facebook instance provides data including comments, apps.\n\n Args:\n The path of the folder user downloaded from Facebook.\n\n Returns:\n All the returns are arrays of dictionaries.\n \"\"\"\n\n def __init__(self, folder):\n self.folder = folder\n\n def profile(self):\n path = str(self.folder + \"/profile_information/profile_information.json\")\n try:\n with open(path) as f:\n d = json.load(f)\n return d[\"profile\"]\n except:\n print(\"read profile info json fail\")\n return []\n\n def comments(self):\n path = str(self.folder + \"/comments/comments.json\")\n try:\n with open(path) as f:\n d = json.load(f)\n return d[\"comments\"]\n except:\n print(\"read comments' json fail\")\n return []\n\n def messages(self):\n path = str(self.folder + \"/messages/inbox/\")\n messages = []\n try:\n folders = os.scandir(path)\n for f in folders:\n if os.path.isdir(os.path.join(path, f)):\n folder = os.scandir(os.path.join(path, f))\n for file in folder:\n if os.path.isfile(os.path.join(path, file)):\n messages.append(json.load(open(os.path.join(path, file))))\n except:\n print(\"read messages json fail\")\n return messages\n\n def posts(self):\n path = str(self.folder + \"/posts/\")\n posts = []\n try:\n folders = os.scandir(path)\n for f in folders:\n if os.path.isfile(os.path.join(path, f)) and ('your_posts' in f.name):\n posts.append(json.load(open(os.path.join(path, f))))\n except:\n print(\"read posts json fail\")\n return posts\n\n def apps(self):\n path = str(self.folder + \"/apps_and_websites/apps_and_websites.json\")\n try:\n with open(path) as f:\n d = json.load(f)\n return d[\"installed_apps\"]\n except:\n #print(\"read apps' json fail\")\n return[]\n\n def offFB_activities(self):\n path = str(self.folder + \"/ads_and_businesses/your_off-facebook_activity.json\")\n try:\n with open(path) as f:\n d = json.load(f)\n return d[\"off_facebook_activity\"]\n except:\n print(\"read offFB' json fail\")\n return []\n\n def offFB_activities_list(self):\n activityList = []\n activity_dict = self.offFB_activities()\n for act in activity_dict:\n activityList.append(act['name'])\n\n return activityList\n\n def account_activity(self):\n path = str(self.folder + \"/security_and_login_information/account_activity.json\")\n try:\n with open(path) as f:\n d = json.load(f)\n return d[\"account_activity\"]\n except:\n print(\"read account_activity' json fail\")\n return []\n\n\nif __name__ == \"__main__\":\n Main()\n","sub_path":"Plots/readFolder.py","file_name":"readFolder.py","file_ext":"py","file_size_in_byte":4841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"111477796","text":"import sys\nimport json\n\n\nclass RedditReader(object):\n def __init__(self):\n self.threads = 0\n self.comments = 0\n self.authors = set()\n self.max_comments = 0\n\n def process_comments(self, comments):\n if comments:\n for comment in comments:\n if comment:\n self.comments += 1\n if 'author' in comment:\n self.authors.add(comment['author'])\n if 'comments' in comment:\n self.process_comments(comment['comments'])\n\n def process_thread(self, thread):\n self.threads += 1\n if 'author' in thread:\n self.authors.add(thread['author'])\n\n comments_before = self.comments\n if 'comments' in thread:\n self.process_comments(thread['comments'])\n comments = self.comments - comments_before\n if comments > self.max_comments:\n self.max_comments = comments\n\n def read_file(self, filename):\n with open(filename, 'r') as f:\n for line in f:\n thread = json.loads(line)\n self.process_thread(thread)\n\n print('threads: %s' % self.threads)\n print('comments: %s' % self.comments)\n print('mean comments per thread: %.2f' % (float(self.comments) / float(self.threads)))\n print('largest thread: %s comments' % self.max_comments)\n print('authors: %s' % len(self.authors))\n\n\nif __name__ == '__main__':\n RedditReader().read_file(sys.argv[1])\n","sub_path":"scripts/reddit-info.py","file_name":"reddit-info.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"612638534","text":"# -*- coding: utf-8 -*-\n# To use this script, make sure you have pandas, xlrd and xlsxwriter installed.\nfrom __future__ import print_function\nimport os\nimport pandas as pd\nimport xlsxwriter\n\n# Create a model: price dictionary from the local table\npriced_list = {}\nprice_table = pd.read_excel(r'priced_list.xlsx')\nfor i in price_table.index:\n model = price_table.ix[i, \"Model\"]\n conditional_price = price_table.ix[i, \"Price 1\":\"Price 7\"].values\n priced_list[model] = conditional_price.tolist()\n\n# Load the questionaire data\ndf = pd.read_excel(r'data.xlsx')\n\n# XlsxWriter can only create new files.\n# Delete the existed file before creating a new one.\nif os.path.exists('final_data.xlsx'):\n os.remove('final_data.xlsx')\n# Create a xlsx file to store the formatted data\nworkbook = xlsxwriter.Workbook('final_data.xlsx')\nworksheet = workbook.add_worksheet()\n\n# Get fixed values from the local file\nwith open(r'fixed_values.txt', 'r') as f:\n fixed_values = map(lambda x: int(x), f.read().split())\n\nscreens = [str(_) for _ in range(1, 21)]\ncars = 5\nmodels = price_table['Model'].values.tolist()\n\n\ndef format_a_response_data(response, row):\n # 1st row\n respid = df.ix[response, \"respid\"]\n worksheet.write(row, 0, respid)\n worksheet.write(row, 1, fixed_values[0])\n worksheet.write(row, 2, fixed_values[1])\n worksheet.write(row, 3, len(screens))\n worksheet.write(row, 4, fixed_values[2])\n row += 1\n # 2nd row\n worksheet.write(row, 0, fixed_values[3])\n row += 1\n for screen in screens:\n # 3rd row\n worksheet.write(row, 0, cars)\n worksheet.write(row, 1, fixed_values[4])\n row += 1\n # 4th - 8th rows\n models_current_screen = df.ix[\n response, \"screensModel_\" + screen].split(';')\n prices_current_screen = df.ix[\n response, \"screensPrice_\" + screen].split(';')\n idx = 0\n while idx < cars:\n model_current = models_current_screen[idx]\n model_current_index = models.index(model_current) + 1\n price_current = int(prices_current_screen[idx])\n price_current_index = priced_list[\n model_current].index(price_current) + 1\n worksheet.write(row, 0, model_current_index)\n worksheet.write(row, 1, price_current_index)\n row += 1\n idx += 1\n # 9th row\n worksheet.write(row, 0, \"the_chosen_model\")\n worksheet.write(row, 1, fixed_values[5])\n row += 1\n return row\n\nrow = 0\nfor response in df.index:\n format_a_response_data(response, row)\n\nworkbook.close()\nprint(\"Data formatted successfully!\")\n","sub_path":"Conjoint/format_data.py","file_name":"format_data.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"216184804","text":"import json\nfrom datetime import datetime, timedelta\nimport config\n#variation_id: \n# 3928 14 days\n# 3929 1 month\n# 3930 3 months\n# 3931 6 months\n# 3932 1 year\n# product_id: 3914\n\n\nclass Subscriptions():\n def __init__(self, response):\n self.response=response\n self.subscriptions=[]\n self.products=[3928, 3929, 3930, 3931, 3932]\n self.product_names=['14 days', '1 month', '3 months', '6 months', '1 year']\n self.role_names=config.role_ids\n\n def sort_entries(self):\n for subs in self.response:\n subscription={}\n\n subscription['discord_id']=subs['billing']['last_name']\n subscription['status']=subs['status']\n subscription['variation_id']=subs['line_items'][0]['variation_id']\n index = [i for i,x in enumerate(self.products) if x == subscription['variation_id']]\n subscription['variation_type']=self.product_names[index[0]]\n subscription['role_id']=self.role_names[index[0]]\n\n end_date = subs['end_date']\n end_date=datetime.strptime(end_date, \"%Y-%m-%dT%H:%M:%S\")\n today_date=datetime.utcnow()\n days_left=end_date-today_date\n subscription['days_before_expire']=days_left.days\n\n # Checking earlier subscriptions appended\n append=True\n for i, sub in enumerate(self.subscriptions):\n\n # If another subscription shares the same discord id\n if sub['discord_id']==subs['billing']['last_name']:\n \n # if the latest subscription has been expired or cancelled it does nothing\n if sub['status']=='active' and (subs['status']=='expired' or subs['status']=='cancelled'):\n append=False\n # if the earlier subscription is expired or cancelled the newer subscription overrides it\n elif (sub['status']=='expired' or sub['status']=='cancelled') and subs['status']=='active':\n self.subscriptions.pop(i)\n\n # If both of the subscriptions are active\n elif sub['status']==\"active\" and subs['status']==\"active\":\n sub_index = [i for i,x in enumerate(self.products) if x == sub['variation_id']]\n subs_index = [i for i,x in enumerate(self.products) if x == subs['line_items'][0]['variation_id']]\n\n # Check which of the subscriptions type is the highest and removes the lowest one\n if sub_index>subs_index:\n append=False\n elif subs_index>sub_index:\n self.subscriptions.pop(i)\n\n # If it's the same subscription type it checks how many days there are left before the subscription expires and removes the lowest one\n else:\n if sub['days_before_expire']>subscription['days_before_expire']:\n append=False\n else:\n self.subscriptions.pop(i)\n\n #Send in new subscription\n if append:\n self.subscriptions.append(subscription)\n return self.subscriptions\n","sub_path":"utils/manage_subscription.py","file_name":"manage_subscription.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"305861329","text":"\nhex_lst = input(\"리틀 엔디언으로 변환할 헥사값 -->\") #리틀엔디언으로 만들 헥사값 입력\nchange_hex = \"\"\n\nif(len(hex_lst) % 2 != 0): # 짝수가 아닐경우 앞의 0이 사라져 있으니, 앞에 0추가 -> 0x08042411\n hex_lst =\"0\" + hex_lst\n\nindex = len(hex_lst)\ncount = 0 #2씩 증가하여 while문 작동\nrunning = True\nwhile(running):\n change_hex += r\"\\x\" + hex_lst[index-2:index]\n count += 2\n index -= 2\n if(count == len(hex_lst)):\n running = False\n\nprint(\"변환된 리틀 엔디언 값 : {0}\".format(change_hex))\n\n \n\n# 결과 값\n# 리틀 엔디언으로 변환할 헥사값 -->3333333\n# 변환된 리틀 엔디언 값 : \\x33\\x33\\x33\\x03\n","sub_path":"Hacking_Study/FTZ_Study/LittleEndian.py","file_name":"LittleEndian.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"462935019","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Discussion',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),\n ('title', models.CharField(max_length=255)),\n ('author', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),\n ('body', models.TextField()),\n ('pub_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),\n ('author', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ('discussion', models.ForeignKey(to='discussions.Discussion')),\n ('replays_to', models.ForeignKey(blank=True, to='discussions.Post')),\n ],\n ),\n migrations.CreateModel(\n name='Tag',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),\n ('name', models.CharField(max_length=255)),\n ],\n ),\n migrations.CreateModel(\n name='Tagging',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),\n ('post_id', models.ForeignKey(to='discussions.Post')),\n ('tag_id', models.ForeignKey(to='discussions.Tag')),\n ],\n ),\n migrations.CreateModel(\n name='UserDetails',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),\n ('about', models.TextField()),\n ('joined', models.DateTimeField(blank=True, default=datetime.datetime.now)),\n ('user_id', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Vote',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),\n ('post_id', models.ForeignKey(to='discussions.Post')),\n ('voter_id', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='discussion',\n name='original_post',\n field=models.OneToOneField(to='discussions.Post', related_name='OP'),\n ),\n ]\n","sub_path":"server/studybuddy/discussions/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"360968140","text":"\"\"\"\nA module for generating realistic HERA noise.\n\"\"\"\n\nimport os\n\nimport aipy\nimport numpy as np\nfrom scipy.interpolate import RectBivariateSpline\nimport aipy\nimport os\nfrom .data import DATA_PATH\nHERA_TSKY_VS_LST_NPZ = os.path.join(DATA_PATH, 'HERA_Tsky_vs_LST.npz')\n\nnpz = np.load(HERA_TSKY_VS_LST_NPZ) # Tsky vs fq/lst from Beardsley, beam v XXX, GSM v XXX\nfqs = npz['freqs'] / 1e3\nlsts = npz['lsts'] / 12. * np.pi\nlsts = np.concatenate([lsts[-10:]-2*np.pi, lsts, lsts[:10]+2*np.pi])\nHERA_Tsky_xx = npz['HERA_Tsky'][0].T\nHERA_Tsky_yy = npz['HERA_Tsky'][1].T\nHERA_Tsky_xx = np.concatenate([HERA_Tsky_xx[-10:], HERA_Tsky_xx, HERA_Tsky_xx[:10]])\nHERA_Tsky_yy = np.concatenate([HERA_Tsky_yy[-10:], HERA_Tsky_yy, HERA_Tsky_yy[:10]])\nHERA_Tsky_mdl = {}\nHERA_Tsky_mdl['xx'] = RectBivariateSpline(lsts, fqs, HERA_Tsky_xx, kx=4, ky=4)\nHERA_Tsky_mdl['yy'] = RectBivariateSpline(lsts, fqs, HERA_Tsky_yy, kx=4, ky=4)\n\n\nHERA_BEAM_POLY = np.array([8.07774113e+08, -1.02194430e+09,\n 5.59397878e+08, -1.72970713e+08, 3.30317669e+07, -3.98798031e+06,\n 2.97189690e+05, -1.24980700e+04, 2.27220000e+02]) # See HERA Memo #27\n\n\ndef jy2T(fqs, bm_poly=HERA_BEAM_POLY):\n \"\"\"\n Return [mK] / [Jy] for a beam size vs. frequency.\n\n Arg:\n fqs (array-like): shape=(NFREQS,), GHz\n the spectral frequencies of the observation to be generated.\n bm_poly (polynomial): default=HERA_BEAM_POLY\n a polynomial fit to the solid-angle beam size of the observation\n as a function of frequency. Used to convert temperatures to Jy.\n Returns:\n jy_to_mK (array-like): shape=(NFREQS,)\n a frequency-dependent scalar converting Jy to mK for the provided\n beam size.'''\n \"\"\"\n lam = aipy.const.c / (fqs * 1e9)\n bm = np.polyval(bm_poly, fqs)\n return 1e-23 * lam ** 2 / (2 * aipy.const.k * bm) * 1e3 # XXX make Kelvin in future\n\n\ndef white_noise(size=1):\n \"\"\"\n Produce complex Gaussian white noise with a variance of unity.\n\n Args:\n size (int or tuple, optional):\n shape of output samples.\n\n Returns:\n noise (ndarray): shape=size\n random white noise realization\n\n \"\"\"\n sig = 1.0 / np.sqrt(2)\n return np.random.normal(scale=sig, size=size) + 1j * np.random.normal(\n scale=sig, size=size\n )\n\n\n# XXX reverse fqs and lsts in this function?\ndef resample_Tsky(fqs, lsts, Tsky_mdl=None, Tsky=180.0, mfreq=0.18, index=-2.5):\n \"\"\"\n Re-sample a model of the sky temperature at particular freqs and lsts.\n\n Args:\n fqs (array-like): shape=(NFREQS,), GHz\n the spectral frequencies of the observation to be generated.\n lsts (array-like): shape=(NTIMES,), radians\n local sidereal times of the observation to be generated.\n Tsky_mdl (callable): interpolation object, default=None\n if provided, an interpolation object that returns the sky temperature as a\n function of (lst, freqs). Called as Tsky(lsts,fqs).\n Tsky (float): Kelvin\n if Tsky_mdl not provided, an isotropic sky temperature\n corresponding to the provided mfreq.\n mfreq (float): GHz\n the spectral frequency, in GHz, at which Tsky is specified\n index (float): default=-2.5\n the spectral index used to extrapolate Tsky to other frequencies\n Returns:\n tsky (array-like): shape=(NTIMES,NFREQS)\n sky temperature vs. time and frequency\n \"\"\"\n if Tsky_mdl is not None:\n tsky = Tsky_mdl(lsts, fqs) # support an interpolation object\n else:\n tsky = Tsky * (fqs / mfreq) ** index # default to a scalar\n tsky = np.resize(tsky, (lsts.size, fqs.size))\n return tsky\n\n\n# XXX make inttime default=None\n# XXX reorder fqs/lsts\ndef sky_noise_jy(Tsky, fqs, lsts, bm_poly=HERA_BEAM_POLY, B=None, inttime=10.7):\n \"\"\"\n Generate Gaussian noise (in Jy units) corresponding to a sky temperature\n model integrated for the specified integration time and bandwidth.\n\n Args:\n Tsky (array-like): shape=(NTIMES,NFREQS), K\n the sky temperature at each time/frequency observation\n fqs (array-like): shape=(NFREQS,), GHz\n the spectral frequencies of the observation\n lsts (array-like): shape=(NTIMES,), radians\n local sidereal times of the observation\n bm_poly (polynomial): default=HERA_BEAM_POLY\n a polynomial fit to the solid-angle beam size of the observation\n as a function of frequency. Used to convert temperatures to Jy.\n B (float): default=None, GHz\n the bandwidth used to integrate noise. If not provided,\n defaults to the delta between fqs,\n inttime (float): default=10.7, seconds\n the time used to integrate noise. If not provided, defaults\n to delta between lsts.\n Returns:\n noise (array-like): shape=(NTIMES,NFREQS)\n complex Gaussian noise vs. time and frequency\n \"\"\"\n if B is None:\n B = np.average(fqs[1:] - fqs[:-1])\n B_Hz = B * 1e9 # bandwidth in Hz\n if inttime is None:\n inttime = (lsts[1] - lsts[0]) / (2 * np.pi) * aipy.const.sidereal_day\n # XXX fix below when jy2T changed to Jy/K\n T2jy = 1e3 / jy2T(fqs, bm_poly=bm_poly) # K to Jy conversion\n T2jy.shape = (1, -1)\n Vnoise_jy = T2jy * Tsky / np.sqrt(inttime * B_Hz) # see noise_study.py for discussion of why no factor of 2 here\n return white_noise(Vnoise_jy.shape) * Vnoise_jy\n\n\ndef thermal_noise(fqs, lsts, Tsky_mdl=None, Trx=0, bm_poly=HERA_BEAM_POLY, inttime=10.7, **kwargs):\n \"\"\"\n Create thermal noise visibilities.\n\n Args:\n fqs (1d array): frequencies, in GHz.\n lsts (1d array): times, in rad.\n Tsky_mdl (callable, optional): a callable model, with signature ``Tsky_mdl(lsts, fqs)``, which returns a 2D\n array of global beam-averaged sky temperatures (in K) as a function of LST and frequency.\n Trx (float, optional): receiver temperature, in K.\n bm_poly (np.poly1d, optional): a polynomial defining the frequency-dependence of the beam size.\n inttime (float, optional): the integration time, in sec.\n **kwargs: passed to :func:`resample_Tsky`.\n\n Returns:\n 2d array size(lsts, fqs): the thermal visibilities [Jy].\n\n \"\"\"\n Tsky = resample_Tsky(fqs, lsts, Tsky_mdl=Tsky_mdl, **kwargs)\n Tsky += Trx\n return sky_noise_jy(Tsky, fqs, lsts, bm_poly=bm_poly, inttime=inttime)\n","sub_path":"hera_sim/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":6530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"7535848","text":"# 用filter求素数\n'''\n埃氏筛法步骤编辑\n(1)先把1删除(现今数学界1既不是质数也不是合数)\n\n(2)读取队列中当前最小的数2,然后把2的倍数删去\n(3)读取队列中当前最小的数3,然后把3的倍数删去\n(4)读取队列中当前最小的数5,然后把5的倍数删去\n(5)读取队列中当前最小的数7,然后把7的倍数删去\n(6)如上所述直到需求的范围内所有的数均删除或读取\n\n'''\n# 建立以 3 为起始的奇数序列,一种生成器,并且是一个无限序列。\n\ndef _odd_iter():\n n = 1\n while True:\n n = n + 2\n yield n\n\n# 然后定义一个筛选函数\n\ndef _not_divisible(n):\n \n return lambda x: x % n > 0\n\n# 定义一个生成器,不断返回下一个素数\n\ndef primes():\n yield 2\n it = _odd_iter() # 初始序列\n while True:\n n = next(it) # 返回序列的第一个数\n yield n\n it = filter(_not_divisible(n), _odd_iter() ) # 构造新序列\n\nfor n in primes():\n if n < 100:\n print(n)\n else:\n break\n","sub_path":"filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"360896054","text":"# -*- coding: utf-8 -*-\n\"\"\"\nIsothermal flash routine\n\"\"\"\n\nimport numpy as np\nimport scipy.optimize as opt\nimport warnings\n\nfrom clausius.utils import (MAX_ITER, ERROR_TOL, psatguess, SearchError,\n Trcrit, Prcrit)\n\n__all__ = ['isothermflash']\n\n\ndef rachford_rice(q, z, K):\n return np.dot(z, (1 - K) / (K + q * (1 - K)))\n\n\ndef raoult_isothermalflash(mixture, q0=0.5):\n \"\"\" Performs isothermal flash using Raoult's Law \"\"\"\n K = psatguess(mixture.critical['acentric'], mixture.reduced['temperature']) / mixture.reduced['pressure']\n\n try:\n quality = opt.newton(rachford_rice, q0, args=(mixture.composition['overall'], K))\n except RuntimeError:\n raise SearchError(\"Isothermal flash failed to converge\")\n\n return quality\n\n\ndef isothermflash(model, mix, temperature, pressure, q0=None):\n \"\"\" Calculates the quality and phase composition at the given temperature\n and pressure, uses Raoult's law to estimate the initial mixture quality\n unless otherwise specified.\n This algorithm is unstable in the region close to the mixture critial point.\n model: model that implements the Model interface\n mixture: instance of Mixture\n temperature: float in Kelvin\n pressure: float in Pascals\n q0: initial guess of mixure quality, unitless\n \"\"\"\n mix.temperature = temperature\n mix.pressure = pressure\n mix.normalize()\n mix.composition['vapor'] = mix.composition['overall']\n mix.composition['liquid'] = mix.composition['overall']\n\n q0 = raoult_isothermalflash(mix, q0=0.5) if q0 is None else q0\n\n if np.all(mix.reduced['pressure'] > Prcrit):\n msg = \"Pressure {} is too high, flash routine routine is unstable in this region\".format(pressure)\n warnings.warn(msg, RuntimeWarning)\n if np.all(mix.reduced['temperature'] > Trcrit):\n msg = \"Temperature {} is too high, flash routine routine is unstable in this region\".format(temperature)\n warnings.warn(msg, RuntimeWarning)\n if (q0 < 0) or (q0 > 1):\n msg = \"Quality {} is not within [0, 1], flash routine may produce strange results\".format(q0)\n warnings.warn(msg, RuntimeWarning)\n\n def loop(q0):\n z = mix.composition['overall']\n K = psatguess(mix.critical['acentric'], mix.reduced['temperature']) / mix.reduced['pressure']\n for count in range(MAX_ITER):\n mix.composition['liquid'] = z / (K + q0 * (1 - K))\n lnphiL = model.logfug(mix, 'liquid')\n\n mix.composition['vapor'] = mix.composition['liquid'] * K\n lnphiV = model.logfug(mix, 'vapor')\n\n K = np.exp(lnphiL - lnphiV)\n q1 = opt.newton(rachford_rice, q0, args=(z, K))\n\n if np.allclose(q1, q0, atol=ERROR_TOL):\n return q1\n\n q0 = q1\n\n raise SearchError(\"Isothermal flash (q= {}) failed to converge after {} iterations\".format(q0, MAX_ITER))\n\n mix.quality = loop(q0)\n return model(mix)\n","sub_path":"clausius/flash/isothermal.py","file_name":"isothermal.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316110075","text":"import numpy as np\nfrom scipy import stats\nfrom scipy.fftpack import fft\n\n\nclass getFeature():\n def get_all_feature(self, inputs):\n inputs = np.array(inputs)\n # 最小值\n min = np.min(inputs)\n # 最大值\n max = np.max(inputs)\n # 均值\n mean = np.mean(inputs)\n # 四分位数范围\n iqr = stats.iqr(inputs)\n # 能量度量\n energy = self.energy(inputs)\n # FFT变换\n process = np.abs(fft(inputs)) / len(inputs) / 2\n # 频域偏度系数\n wskew = stats.skew(process)\n # 频域峰度系数\n wkurtosis = stats.kurtosis(process)\n # 将所有特征合并为数组\n array = [min, max, mean, iqr, energy, wskew, wkurtosis]\n return array\n\n # 计算能量度量\n def energy(self, inputs):\n return np.dot(inputs, np.transpose(inputs))/len(inputs)\n","sub_path":"pycharm project/web/feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"41173532","text":"from tkinter import * #import all the tkinter classes so that we can use them\nfrom tkinter import ttk\n\nwin = Tk() # create a blank window called root\nwin.title(\"Video Rental Application\")\nwin.geometry(\"640x480+100+100\") # width x height + x position + y position\n#win.wm_iconbitmap('Video Rental.ico')\nwin.configure(background=\"#e5e5e5\") #A Shade of Grey\n\nroot = Frame(win,width=400)\nroot.pack(fill=BOTH,expand=TRUE)\n\ndef SAVE():\n print(\"Working\")\n\n\n\nlabel_1 = Label(root, text=\"Date Rented\", background=\"#e5e5e5\")\nlabel_2 = Label(root, text=\"Date Due\", background=\"#e5e5e5\")\nlabel_3 = Label(root, text=\"Select Video\", background=\"#e5e5e5\")\nlabel_4 = Label(root, text=\"Available Copies\", background=\"#e5e5e5\")\nlabel_5 = Label(root, text=\"OverDue\", background=\"#e5e5e5\")\nlabel_6 = Label(root, text=\"Total Amount\", background=\"#e5e5e5\")\nlabel_10 = Label(root, text=\" \", background=\"#e5e5e5\")\nlabel_11 = Label(root, text=\" \", background=\"#e5e5e5\")\nlabel_12 = Label(root, text=\"Name\", background=\"#e5e5e5\")\nlabel_13 = Label(root, text=\"Address\", background=\"#e5e5e5\")\nlabel_14 = Label(root, text=\"Phone Number\", background=\"#e5e5e5\")\nlabel_15 = Label(root, text=\"Customer ID\", background=\"#e5e5e5\")\nlabel_16 = Label(root, text=\"Member\", background=\"#e5e5e5\")\n\nentry_1 = Entry(root)\nentry_2 = Entry(root)\nentry_3 = Entry(root)\nentry_4 = Entry(root)\nentry_5 = Entry(root)\nentry_6 = Entry(root)\n\n# widgets centered by default, sticky option to change\nlabel_1.grid(row=2, column=1, sticky=W) # E is for east or right alignment\nlabel_2.grid(row=4, column=1, sticky=W)\nlabel_3.grid(row=0, column=1, sticky=W)\nlabel_4.grid(row=6, column=1, sticky=W)\nlabel_5.grid(row=8, column=1, sticky=W)\nlabel_6.grid(row=9, column=1, sticky=W)\nlabel_10.grid(row=0, column=2, sticky=W)\nlabel_11.grid(row=0, column=4, sticky=W)\nlabel_12.grid(row=0, column=3, sticky=W)\nlabel_13.grid(row=2, column=3, sticky=W)\nlabel_14.grid(row=4, column=3, sticky=W)\nlabel_15.grid(row=6, column=3, sticky=W)\nlabel_16.grid(row=8, column=3, sticky=W)\nentry_1.grid(row=3, column=1)\nentry_2.grid(row=5, column=1)\nentry_3.grid(row=1, column=3)\nentry_4.grid(row=3, column=3)\nentry_5.grid(row=5, column=3)\nentry_6.grid(row=7, column=3)\n\nc1 = Checkbutton(root)\nc1.grid(row=8, column=1)\n\nc2 = Checkbutton(root)\nc2.grid(row=8, column=3)\n\ndef printValue(event):\n print(value.get())\nvalue = StringVar()\nbox = ttk.Combobox(root, textvariable=value, state='readonly')\nbox['values'] = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10')\nbox.bind('<>',printValue) #when a value is selected, call printValue()\nbox.current(0)\nbox.grid(row=7, column=1, sticky=W)\n\ndef printValue(event):\n print(value.get())\nvalue = StringVar()\nbox = ttk.Combobox(root, textvariable=value, state='readonly')\nbox['values'] = ('Mad Max: Fury Road', 'The Hateful Eight', 'The Revenant', 'Deadpool')\nbox.bind('<>',printValue) #when a value is selected, call printValue()\nbox.current(0)\nbox.grid(row=1, column=1)\n\n# Tkinter puts menus at the top by default\n\n\nbutton = Button(root, text=\"Save\", command=SAVE, background=\"#e5e5e5\")\nbutton.grid(row=1, column=5, sticky=E)\n\nbutton = Button(root, text=\"Exit\", command=root.destroy, background=\"#e5e5e5\")\nbutton.grid(row=3, column=5, sticky=E)\n\n\n\n#exitWithSound\n\ndef updateStatus():\n status.config(text=\"......Saved......\")\n\n# bd is border, relief is type of border\n\nstatusFrame = Frame(win)\nstatusFrame.pack(fill=X)\n\nstatus = Label(statusFrame, text=\"......Idle......\", bd=1, relief=SUNKEN, anchor=W)\nstatus.pack(fill=X,expand=True,side=LEFT)\n\nroot.after(3000,updateStatus) # after 3 seconds update the status\n\nmenu = Menu(win)\nwin.config(menu=menu)\nwin.option_add(\"*tearOff\", False)\nsubMenu = Menu(menu)\n# Adds a drop down when \"File\" is clicked\nmenu.add_cascade(label=\"File\", menu=subMenu)\nsubMenu.add_command(label=\"Save...\", command=SAVE)\nsubMenu.add_separator()\nsubMenu.add_command(label=\"Exit\", command=win.destroy)\n\nwin.mainloop() # make the program run forever","sub_path":"student_examples/james.py","file_name":"james.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"170595601","text":"# Contains Duplicate II\n\n# Given an integer array nums and an integer k, return true if there are two\n# distinct indices i and j in the array such that nums[i] == nums[j] and\n# abs(i - j) <= k.\n\n# Example 1:\n\n# Input: nums = [1,2,3,1], k = 3\n# Output: true\n\n# O(n) time, O(n) space.\nclass Solution:\n def containsNearbyDuplicate(self, nums: List[int], k: int) -> bool:\n hashmap = {}\n for index, num in enumerate(nums):\n if num in hashmap and abs(index - hashmap[num]) <= k:\n return True\n hashmap[num] = index\n return False\n\n","sub_path":"easy/contains_duplicate_ii.py","file_name":"contains_duplicate_ii.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"260341488","text":"# image2meat.py\n# Tool that can replace all files in a directory with meatspin\n# author: Antitree\n# Requirements:\n# Imagemagick\n# pip install wand\n\nfrom wand.image import Image\nfrom wand.display import display\nimport requests\nimport shutil\nimport argparse\nimport os\nlistdir = os.listdir\nMEATSPIN = 'meatspin.gif' # path to meatspin.gif\n\ndef main():\n\n parser = argparse.ArgumentParser(\n description='Image Meatspin injector')\n parser.add_argument('-f',\n dest='path',\n help=\"Folder containing images\")\n parser.add_argument('-g',\n dest='gif',\n action=\"store_true\",\n help='Only inject onto gifs to be more subtle')\n parser.add_argument('-d',\n dest='debug',\n action='store_true',\n help=\"Debug output\")\n args = parser.parse_args()\n path = args.path\n if not path:\n path = '.'\n file_check(path)\n if args.gif:\n image_extensions = [\".gif\"]\n else:\n image_extensions = [\".gif\", \".png\", \".jpg\"]\n files = [x for x in listdir(path) if str(x[-4:]) in image_extensions]\n counter = 0\n for file in files:\n file = path + '/' + file\n print(\"'Adjusting' file: %s\" % file)\n with Image(filename=file) as img:\n with Image(filename=MEATSPIN) as meat:\n meat.resize(img.size[0], img.size[1])\n meat.sequence.insert(0,img)\n meat.sequence.insert(len(meat.sequence), img)\n if args.debug:\n meat.save(filename=\"Balls.gif\")\n else:\n meat.save(filename=file)\n counter += 1\n print(\"Injected meatspin into %s images\" % counter)\n\ndef file_check(path):\n # check if meatspin exists\n if os.path.exists(MEATSPIN):\n print(\"Meatspin.gif found! Continuing on\")\n else:\n print(\"Can't find the meatspin...\")\n answer = raw_input(\"Do you want me to download meatspin for you? Y/N\")\n if answer == '' or answer.lower() == \"y\":\n r = requests.get('http://vignette3.wikia.nocookie.net/t101medialifestudyguide/images/3/3d/Meatspin.gif/revision/latest?cb=20100304041345', stream=True)\n with open(MEATSPIN, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n\nif __name__ == '__main__':\n main()\n","sub_path":"meatinject.py","file_name":"meatinject.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"249507101","text":"import argparse\nimport os\nimport sys\nimport tensorflow as tf\nimport input_data\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport keras\nimport util\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\ndef trans(dataset,batch_size):\n while 1:\n tmp=dataset.next_batch(batch_size)\n xx=np.array(tmp[0])\n yy=np.array(tmp[1])\n yy = keras.utils.to_categorical(yy,3)\n# print(xx.shape)\n # print(yy.shape)\n yield xx,yy\nos.environ[\"CUDA_VISIBLE_DEVICES\"] =\"0\"\nbatch_size =64\nnum_classes = 3\nepochs =350\ndata_augmentation = True\nnum_predictions = 20\nsave_dir = os.path.join(os.getcwd(), 'saved_models')\nmodel_name = 'keras_kws_trained_model.h5'\n\ntrain,test = input_data.read_data_sets()\n(xxx,yyy)=(test._exampls,test._labels)\n(x_test,y_test)=(np.array(xxx),np.array(yyy))\n#print('x_train shape:', x_train.shape)\n#print(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n#y_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\nmodel = Sequential()\nmodel.add(Dense(128,input_shape=(1640,)))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\nmodel.add(Dense(num_classes))\nmodel.add(Activation('softmax'))\n\nopt = keras.optimizers.SGD(lr=0.001, momentum=0.0)\nmodel.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n#x_train = x_train.astype('float32')\n#x_test = x_test.astype('float32')\nmodel.fit_generator(trans(train,batch_size),\n epochs=epochs,\n validation_data=(x_test, y_test),\n steps_per_epoch=13,\n verbose=2,\n workers=0)\nif not os.path.isdir(save_dir):\n os.makedirs(save_dir)\nmodel_path = os.path.join(save_dir, model_name)\nmodel.save(model_path)\nprint('Saved trained model at %s ' % model_path)\n\n# Score trained model.\nscores = model.evaluate(x_test, y_test, verbose=1)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])\n# process in paper\nprobability=model.predict(x_test,verbose=0,batch_size=batch_size)\nconfidence=util.posteriorHandling(probability,test.fbank_end_frame)\n#print(confidence)\nl1,l2=util.do_eval(confidence)\nl1=np.array(l1)\nl2=np.array(l2)\nnp.save(\"l5\",l1)\nnp.save(\"l6\",l2)\n","sub_path":"keras-128.py","file_name":"keras-128.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"497317760","text":"__author__ = 'yuandian'\n# -*- coding:utf-8 -*-\nimport os #引用模块\nimport time\ns=[\"Jetty1\",\"我2\"] #定义一个字符串列表\nfn=r\"d:\\nohao.txt\" #把文件路径赋值给变量,便于调用\ndef X():#定义一个函数,便于调用\n a =open(fn,\"w\")#打开文件,权限为写入\n a.write(\"你好,我来了,Jetty\")\n a.close() #关闭文件\n print(\"未找到文件,写入...\")\nif os.path.exists(fn):\n pass\nelse:\n X()\n print(\"写入文件\")\n\ndef G():\n a = open(fn,\"w\",encoding=\"utf-8\")\n a.write(\"guoshuai is beatiful\")\n a.close();\n print(\"文件被篡改,并且已经恢复...\")\n\ndef D():\n a=open(fn).read() # 进行读文件的内容,读出来,复制到a变量到去\n return a;\nif os.path.exists(fn):#判断文件是否存在\n Z=D()\n print(Z)\n if Z.find(s[0])!=-1 and Z.find(s[1])!=-1:\n print(\"关键词全部都存在,不做任何操作\")\n else:\n print(Z.find(s[0])) #输出7\n G()\nelse:\n X()\n\n\n\n\n","sub_path":"python_1/pachong/f.py","file_name":"f.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"352870956","text":"import logging\r\nfrom binance_f import SubscriptionClient\r\nfrom binance_f.constant.test import *\r\nfrom binance_f.model import *\r\nfrom binance_f.exception.binanceapiexception import BinanceApiException\r\n\r\nfrom binance_f.base.printobject import *\r\n\r\nlogger = logging.getLogger(\"binance-futures\")\r\nlogger.setLevel(level=logging.INFO)\r\nhandler = logging.StreamHandler()\r\nhandler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\r\nlogger.addHandler(handler)\r\n\r\nsub_client = SubscriptionClient(api_key=g_api_key, secret_key=g_secret_key)\r\n\r\n\r\ndef callback(data_type: 'SubscribeMessageType', event: 'any'):\r\n if data_type == SubscribeMessageType.RESPONSE:\r\n print(\"Event ID: \", event)\r\n elif data_type == SubscribeMessageType.PAYLOAD:\r\n for item in event:\r\n PrintBasic.print_obj(item)\r\n print(\"\")\r\n # sub_client.unsubscribe_all()\r\n else:\r\n print(\"Unknown Data:\")\r\n print()\r\n\r\n\r\ndef error(e: 'BinanceApiException'):\r\n print(e.error_code + e.error_message)\r\n\r\nsub_client.subscribe_all_miniticker_event(callback, error)","sub_path":"example/websocket/subscribeallminiticker.py","file_name":"subscribeallminiticker.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"327296779","text":"from ftw.builder import Builder\nfrom ftw.builder import create\nfrom ftw.testbrowser import browsing\nfrom opengever.testing import FunctionalTestCase\nfrom plone import api\nfrom zExceptions import Unauthorized\n\n\nclass TestDossierWorkflow(FunctionalTestCase):\n\n def test_deleting_dossier_is_only_allowed_for_managers(self):\n repository_root, repository = create(Builder('repository_tree'))\n dossier = create(Builder('dossier').within(repository))\n\n acl_users = api.portal.get_tool('acl_users')\n valid_roles = list(acl_users.portal_role_manager.valid_roles())\n valid_roles.remove('Manager')\n self.grant(*valid_roles)\n\n with self.assertRaises(Unauthorized):\n api.content.delete(obj=dossier)\n\n @browsing\n def test_offer_transition_is_hidden_in_action_menu(self, browser):\n self.grant('Manager')\n dossier = create(Builder('dossier').as_expired())\n\n browser.login().open(dossier)\n self.assertEquals(\n [], browser.css('#workflow-transition-dossier-transition-offer'))\n\n @browsing\n def test_archive_transition_is_hidden_in_action_menu(self, browser):\n self.grant('Manager')\n dossier = create(Builder('dossier')\n .in_state('dossier-state-offered'))\n\n browser.login().open(dossier)\n self.assertEquals(\n [], browser.css('#workflow-transition-dossier-transition-archive'))\n","sub_path":"opengever/dossier/tests/test_dossier_workflow.py","file_name":"test_dossier_workflow.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"37139169","text":"# Copyright 2018 Iguazio\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport mlrun\nfrom mlrun.runtimes import RuntimeKinds\n\nfrom .model import DataTarget\nfrom .sources import get_source_step\nfrom .targets import add_target_states\nfrom mlrun.datastore.store_resources import ResourceCache\nfrom mlrun.serving.server import create_graph_server\nfrom ..runtimes.function_reference import FunctionReference\n\n\ndef init_featureset_graph(source, featureset, namespace, targets=None, return_df=True):\n \"\"\"create storey ingestion graph/DAG from feature set object\"\"\"\n\n cache = ResourceCache()\n graph = featureset.spec.graph.copy()\n\n # init targets (and table)\n targets = targets or []\n _add_data_states(\n graph, cache, featureset, targets=targets, source=source, return_df=return_df,\n )\n\n server = create_graph_server(graph=graph, parameters={})\n server.init(None, namespace, cache)\n return graph\n\n\ndef featureset_initializer(server):\n \"\"\"graph server hook to initialize feature set ingestion graph/DAG\"\"\"\n\n context = server.context\n cache = server.resource_cache\n\n featureset_uri = context.get_param(\"featureset\")\n source = context.get_param(\"source\")\n featureset = context.get_store_resource(featureset_uri)\n targets = context.get_param(\"targets\", None)\n if targets:\n targets = [DataTarget.from_dict(target) for target in targets]\n else:\n targets = featureset.spec.targets\n\n graph = featureset.spec.graph.copy()\n _add_data_states(\n graph, cache, featureset, targets=targets, source=source,\n )\n server.graph = graph\n\n\ndef _add_data_states(\n graph, cache, featureset, targets, source, return_df=False,\n):\n _, default_final_state, _ = graph.check_and_process_graph(allow_empty=True)\n cache.cache_resource(featureset.uri, featureset, True)\n table = add_target_states(\n graph, featureset, targets, to_df=return_df, final_state=default_final_state\n )\n if table:\n cache.cache_table(featureset.uri, table, True)\n\n entity_columns = list(featureset.spec.entities.keys())\n key_column = entity_columns[0] if entity_columns else None\n if source is not None:\n source = get_source_step(\n source, key_column=key_column, time_column=featureset.spec.timestamp_key,\n )\n graph.set_flow_source(source)\n\n\ndef deploy_ingestion_function(\n name, source, featureset, parameters, function_ref=None, local=False, watch=True\n):\n name = name or f\"{featureset.metadata.name}_ingest\"\n function_ref = function_ref or featureset.spec.function.copy()\n if not function_ref.to_dict():\n runtime_kind = RuntimeKinds.serving if source.online else RuntimeKinds.job\n function_ref = FunctionReference(name=name, kind=runtime_kind)\n if not function_ref.kind:\n raise mlrun.errors.MLRunInvalidArgumentError(\n f\"function reference is missing kind {function_ref}\"\n )\n\n if function_ref.kind == RuntimeKinds.serving:\n function_ref.code = function_ref.code or \"\"\n elif function_ref.kind == RuntimeKinds.spark:\n function_ref.code = function_ref.code or _default_spark_handler\n # todo: use spark specific image\n else:\n function_ref.code = function_ref.code or _default_job_handler\n\n function_ref.image = function_ref.image or \"mlrun/mlrun\"\n function = function_ref.to_function()\n function.metadata.project = featureset.metadata.project\n\n if function.kind == RuntimeKinds.serving:\n # add triggers\n function.spec.parameters = parameters\n function.spec.graph_initializer = (\n \"mlrun.feature_store.ingestion.featureset_initializer\"\n )\n function.verbose = True\n if local:\n return function.to_mock_server()\n else:\n function.deploy()\n else:\n return function.run(\n params=parameters, schedule=source.schedule, local=local, watch=watch\n )\n\n\n_default_job_handler = \"\"\"\nimport mlrun\ndef handler(context):\n verbose = context.get_param('verbose', True)\n server = mlrun.serving.create_graph_server(parameters=context.parameters, verbose=verbose)\n server.graph_initializer = \"mlrun.feature_store.ingestion.featureset_initializer\"\n server.init(None, globals())\n server.wait_for_completion()\n\"\"\"\n\n\n_default_spark_handler = \"\"\"\nimport mlrun\ndef handler(context):\n pass\n # todo: call to spark ingestion handler\n\"\"\"\n","sub_path":"mlrun/feature_store/ingestion.py","file_name":"ingestion.py","file_ext":"py","file_size_in_byte":4934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"545098958","text":"#!/usr/bin/env python\n\nimport re\nfrom glob import glob\nimport os\nimport sys\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\ndef get_cmap(n, name='hsv'):\n '''Returns a function that maps each index in 0, 1, ..., n-1 to a distinct \n RGB color; the keyword argument name must be a standard mpl colormap name.\n '''\n cap = plt.cm.get_cmap(name, n+1)\n return [cap(i) for i in range(n)]\n\ndef PlotQC(path, xlb, outpath):\n paths = [os.path.join(path, 'QC1_table.tab'),\\\n os.path.join(path, 'QC2_table.tab')]\n label = ['ForwardRead', 'ReverseRead']\n list_np = []\n medians = []\n fig, axes = plt.subplots()\n for path in paths:\n np_tmp = np.loadtxt(path, dtype='S10')\n np_in = np_tmp[1:10000, 1].astype('float')\n list_np.append(np_in)\n medians.append(np.median(np_in))\n vplot = axes.violinplot(list_np, showmeans=False,\\\n showmedians=False, showextrema=False, widths=0.2)\n bplot = axes.boxplot(list_np, vert=True, patch_artist=True,\\\n showfliers=False, widths=0.03, medianprops={'linestyle': 'None'})\n inds = np.arange(1, len(medians)+1)\n axes.scatter(inds, medians, marker='o', color='white', s=30, zorder=3)\n for patch in bplot['boxes']:\n patch.set_facecolor('black')\n for patch, color in zip(vplot['bodies'], get_cmap(len(label))):\n patch.set_color(color)\n axes.set_xticks([y+1 for y in range(len(label))], )\n axes.set_xlabel(xlb)\n axes.set_ylabel('Value')\n axes.set_xticklabels(label)\n axes.spines['right'].set_visible(False)\n axes.spines['top'].set_visible(False)\n axes.set_title('Distribution of Reads Quality')\n plt.savefig(os.path.join(outpath, 'ReadsQualityOf{}.png'.format(xlb)))\n\ndef main():\n PlotQC(sys.argv[1], sys.argv[2], sys.argv[3])\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"bin/Race/Statistic/QcPlot.py","file_name":"QcPlot.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"378304022","text":"\n\n#calss header\nclass _PORN():\n\tdef __init__(self,): \n\t\tself.name = \"PORN\"\n\t\tself.definitions = [u'informal for pornography : ', u'pictures, books, television programmes , newspaper articles, etc. that are intended to be exciting for people interested in a particular subject or particular product: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_porn.py","file_name":"_porn.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"627181583","text":"def longestLength(a):\n\tmax1 = len(a[0])\n\ttemp = a[0]\n\n\tfor i in a:\n\t\tif(len(i) > max1):\n\n\t\t\tmax1 = len(i)\n\t\t\ttemp = i\n\n\tprint(\"The word with the longest length is:\", temp,\n\t\t\" and length is \", max1)\n\n\na = [\"apple\", \"savan\", \"name\", \"game\"]\nlongestLength(a)\n","sub_path":"savan.py","file_name":"savan.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"570380137","text":"import sys\n\nfrom bioblend import galaxy\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--tag',\n help='Tag'\n )\n parser.add_argument(\n '-g', '--galaxy-url', required=True,\n help='URL of the Galaxy instance to run query against'\n )\n parser.add_argument(\n '-a', '--api-key', required=True,\n help='API key to use for authenticating on the Galaxy server'\n )\n args = parser.parse_args()\n\n gi = galaxy.GalaxyInstance(args.galaxy_url, args.api_key)\n sys.stdout.write(gi.histories._get(params={'q': ['tag'], 'qv': [args.tag]})[0]['id'])\n","sub_path":"bioblend-scripts/get_most_recent_history_by_tag.py","file_name":"get_most_recent_history_by_tag.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"568046006","text":"import math\nimport time\n\n\ndef find(sequence, min_tetrads=2, min_score=17, log=False):\n raw_g4s = list()\n\n before = time.time()\n cands = seedQ(sequence, min_tetrads)\n while len(cands) > 0:\n cand = cands.pop()\n if cand.complete():\n if cand.viable(min_score):\n raw_g4s.append(makeG4(cand))\n else:\n expanded = cand.expand()\n for c in expanded:\n cands.append(c)\n\n fams = list()\n for g in raw_g4s:\n newfam = True\n for f in fams:\n if belongsin(g, f):\n f.append(g)\n newfam = False\n if newfam:\n f = list()\n f.append(g)\n fams.append(f)\n\n g4s = list()\n for fam in fams:\n best = select_best(fam)\n fam.remove(best)\n #best['overlaps'] = fam\n g4s.append(best)\n return g4s\n\n\ndef select_best(family):\n highest = 0\n best = None\n for g4 in family:\n if g4['gscore'] > highest:\n highest = g4['gscore']\n best = g4\n\n return best\n\n\ndef belongsin(g4, family):\n for member in family:\n if overlapped(g4, member):\n return True\n\n return False\n\n\ndef overlapped(a, b):\n a_start = a['start']\n a_end = a_start + a['length']\n\n b_start = b['start']\n b_end = b_start + b['length']\n\n if a_start >= b_start and a_start <= b_end:\n return True\n if a_end >= b_start and a_end <= b_end:\n return True\n if b_start >= a_start and b_start <= a_end:\n return True\n if b_end >= a_start and b_end <= a_end:\n return True\n return False\n\n\ndef seedQ(sequence, min_tetrads):\n g = min_tetrads\n starts = list()\n cands = list()\n done = False\n\n while not done:\n starts = getStartingPoints(sequence, g)\n for start in starts:\n cands.append(Cand(sequence, g, start))\n g += 1\n done = len(starts) is 0\n\n return cands\n\n\ndef getStartingPoints(sequence, g):\n tstring = \"G\" * g\n p = 0\n done = False\n starts = list()\n\n while not done:\n p = sequence.find(tstring, p)\n if p >= 0:\n starts.append(p)\n else:\n done = True\n p += 1\n\n return starts\n\n\ndef maximumLength(numTetrads):\n if numTetrads < 3:\n return 30\n else:\n return 45\n\n\ndef makeG4(candidate):\n g4 = dict()\n g4['start'] = candidate.start\n g4['tetrads'] = candidate.numTetrads\n g4['tetrad1'] = candidate.t1()\n g4['tetrad2'] = candidate.t2()\n g4['tetrad3'] = candidate.t3()\n g4['tetrad4'] = candidate.t4()\n g4['y1'] = candidate.y1\n g4['y2'] = candidate.y2\n g4['y3'] = candidate.y3\n g4['length'] = candidate.length()\n g4['gscore'] = candidate.score()\n g4['sequence'] = candidate.sequence[candidate.start:candidate.start + candidate.length()]\n return g4\n\n\nclass Cand:\n def __init__(self, sequence, tetrads, start_pos):\n self.y1 = -1\n self.y2 = -1\n self.y3 = -1\n self.sequence = sequence\n self.numTetrads = tetrads\n self.start = start_pos\n self.tstring = \"G\" * self.numTetrads\n self.maxLength = maximumLength(tetrads)\n\n def score(self):\n # assert self.complete(), \"Can't compute g-score for incomplete G4 candidate\"\n\n gavg = (math.fabs(self.y1 - self.y2) + math.fabs(self.y2 -\n self.y3) + math.fabs(self.y1 - self.y3)) / 3.0\n\n return math.floor(self.gmax() - gavg + self.gmax() * (self.numTetrads - 2))\n\n def gmax(self):\n return self.maxLength - (self.numTetrads * 4 + 1)\n\n def length(self):\n # assert self.complete(), \"Can't compute length of incomplete G4 candidate\"\n return 4 * self.numTetrads + self.y1 + self.y2 + self.y3\n\n def expand(self):\n # assert not self.complete(), \"Cannot expand complete G4 motif\"\n cands = list()\n ys = self.findLoopLengthsFrom(self.cursor())\n for y in ys:\n cand = Cand(self.sequence, self.numTetrads, self.start)\n cand.y1 = self.y1\n cand.y2 = self.y2\n cand.y3 = self.y3\n if self.y1 < 0:\n cand.y1 = y\n elif self.y2 < 0:\n cand.y2 = y\n elif self.y3 < 0:\n cand.y3 = y\n\n if cand.partialLength() <= cand.maxLength:\n cands.append(cand)\n\n return cands\n\n def asString(self):\n if self.complete():\n return \"[\" + str(self.numTetrads) + \"]: \" + self.sequence[0:self.start] + \"[\" + self.sequence[self.start:self.start + self.length()] + \"]\" + self.sequence[self.start + self.length():] + \" -> \" + str(self.score())\n else:\n return \"[\" + str(self.numTetrads) + \"]: \" + self.sequence[0:self.start] + \"[\" + self.sequence[self.start:self.cursor()] + \"*\"\n\n def t1(self):\n return self.start\n\n def t2(self):\n # assert self.y1 >= 0, \"Tetrad 2 position can't be computed until loop 1 is found\"\n return self.t1() + self.numTetrads + self.y1\n\n def t3(self):\n # assert self.y2 >= 0, \"Tetrad 3 position can't be computed until loop 2 is found\"\n return self.t2() + self.numTetrads + self.y2\n\n def t4(self):\n # assert self.y3 >= 0, \"Tetrad 4 position can't be computed until loop 3 is found\"\n return self.t3() + self.numTetrads + self.y3\n\n def cursor(self):\n # assert not self.complete(), \"Cursor cannot be computed on a complete G4\"\n\n if self.y1 < 0:\n return self.t1() + self.numTetrads\n elif self.y2 < 0:\n return self.t2() + self.numTetrads\n elif self.y3 < 0:\n return self.t3() + self.numTetrads\n\n def partialLength(self):\n length = self.numTetrads * 4\n\n # add the minimum loops left\n if self.y1 >= 0 and self.y2 < 0:\n # only first loop is known\n if self.y1 == 0:\n # other two must be at least 2\n length += 2\n else:\n length += 1\n\n elif self.y2 >= 0 and self.y3 < 0:\n # first two loop lengths are known\n if self.y1 == 0 or self.y2 == 0:\n length += 1\n # if neither y1 or y2 are zero, the next loop could be 0\n\n # add the current loops\n if self.y1 > 0:\n length += self.y1\n if self.y2 > 0:\n length += self.y2\n if self.y3 > 0:\n length += self.y3\n\n return length\n\n def findLoopLengthsFrom(self, i):\n ys = list()\n p = i\n done = False\n while not done:\n p = self.sequence.find(\n self.tstring, p, self.start + self.maxLength + 1)\n if p >= 0:\n y = p - i\n if y >= self.minAcceptableLoopLength() and (p - self.start + len(self.tstring) - 1) < self.maxLength:\n ys.append(y)\n else:\n done = True\n else:\n done = True\n p += 1\n\n return ys\n\n def minAcceptableLoopLength(self):\n if self.y1 == 0 or self.y2 == 0 or self.y3 == 0:\n return 1\n else:\n return 0\n\n def complete(self):\n if self.y1 < 0 or self.y2 < 0 or self.y3 < 0:\n return False\n else:\n return True\n\n def viable(self, min_score):\n if self.score() < min_score:\n return False\n if self.length() > self.maxLength:\n return False\n\n # only one loop is allowed to have a 0 length\n count = 0\n if self.y1 < 1:\n count += 1\n if self.y2 < 1:\n count += 1\n if self.y3 < 1:\n count += 1\n return count < 2\n","sub_path":"seed/g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":7773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"504246764","text":"#!-*- coding:utf-8 -*-\nimport sys\nimport os\n# import PyQt4 QtCore and QtGui modules\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5 import uic\nfrom pylinac import WinstonLutz\nfrom PyQt5 import QtCore\nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.tests.results.results_arma import current_path\n\nfrom Window import Ui_MainWindow\n\n(Ui_MainWindow, QWindow) = uic.loadUiType('Window.ui')\n\nGANTRY = 'Gantry'\nCOLLIMATOR = 'Collimator'\nCOUCH = 'Couch'\n\nclass DirectoryPath(object):\n def __init__(self, pathDir,getCountImages):\n self._pathDir = pathDir\n self._getCountImages=getCountImages\n\n @property\n def pathDir(self):\n return getattr(self, '_pathDir')\n\n @pathDir.setter\n def pathDir(self, pathDir):\n self._pathDir = pathDir\n\n @property\n def getCountImages(self):\n return getattr(self,'_getCountImages')\n\n @getCountImages.setter\n def getCountImages(self,getCountImages):\n self._getCountImages=getCountImages\n\nclass MainWindow(QWindow):\n \"\"\"MainWindow inherits QMainWindow\"\"\"\n\n # d = DirectoryPath(pathDir=None)\n def __init__(self, parent=None):\n super(QWindow, self).__init__(parent)\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n def __del__(self):\n self.ui = None\n\n def showdialog(self):\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n\n msg.setText(\"End of the Images\")\n msg.setInformativeText(\"This is additional information\")\n msg.setWindowTitle(\"MessageBox demo\")\n msg.setDetailedText(\"The details are as follows:\")\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n\n msg.exec_()\n\n def changeImages(self):\n\n num = int(self.ui.spinBox.text())\n direct_tory = d.pathDir\n wl = WinstonLutz(direct_tory)\n if num < d.getCountImages:\n wl.images[num].plot()\n else:\n self.showdialog()\n def summeryCone(self):\n wl = WinstonLutz(d.pathDir)\n wl.plot_coneSummery()\n wl.publishCone_pdf(d.pathDir+'//resultCone.pdf')\n\n\n def getIzoSize(self):\n wl = WinstonLutz(d.pathDir)\n self.ui.label_CouchZize.setText(str('{0:.2f}'.format(wl.couch_iso_size)))\n self.ui.label_GentrySize.setText(str(wl.collimator_iso_size))\n self.ui.label_GentryIZOSIZE.setText(str('{0:.2f}'.format(wl.gantry_iso_size)))\n\n def GetConeIzoSize(self):\n wl = WinstonLutz(d.pathDir)\n self.ui.label_CouchZize.setText(str('{0:.2f}'.format(wl.couch_iso_size)))\n self.ui.label_GentryIZOSIZE.setText(str('{0:.2f}'.format(wl.gantry_iso_size)))\n\n def getEpidSag(self):\n wl = WinstonLutz(d.pathDir)\n wl.plot_epid_sag()\n\n def GentrySag(self):\n wl = WinstonLutz(d.pathDir)\n wl.plot_gantry_sag()\n\n\n def slot1(self):\n file = open(\"file.txt\", \"w\")\n directory = QFileDialog.getExistingDirectory(self, 'Select backup directory')\n win_directory = QtCore.QDir.toNativeSeparators(directory)\n wl = WinstonLutz(win_directory)\n d.getCountImages=len(wl.images)\n d.pathDir = win_directory\n wl.images[0].plot()\n self.ui.pushButton__ConeOpen.setEnabled(False)\n self.ui.pushButton__SummaryCone.setEnabled(False)\n self.ui.pushButton__ConeIzoSize.setEnabled(False)\n\n a=wl.results()\n file.write(str(a))\n file.close()\n\n def OpenCOne(self):\n directory = QFileDialog.getExistingDirectory(self, 'Select backup directory')\n win_directory = QtCore.QDir.toNativeSeparators(directory)\n wl = WinstonLutz(win_directory)\n d.getCountImages = len(wl.images)\n d.pathDir = win_directory\n wl.images[0].plot()\n self.ui.pushButton.setEnabled(False)\n self.ui.pushButton__Summary.setEnabled(False)\n self.ui.pushButton__IzoSize.setEnabled(False)\n file = open(\"file.txt\", \"w\")\n file.write(str(wl.result_cone()))\n file.close()\n\n def sumary_Info(self):\n wl = WinstonLutz(d.pathDir)\n wl.plot_summary()\n wl.publish_pdf(d.pathDir+'//result.pdf')\n\n\n# -----------------------------------------------------#\nif __name__ == '__main__':\n # create application\n app = QApplication(sys.argv)\n app.setApplicationName('untitled2')\n\n d = DirectoryPath(pathDir=\"\",getCountImages=0)\n\n # create widget\n w = MainWindow()\n w.setWindowTitle('untitled2')\n w.show()\n\n # connection\n # QObject.connect( app, SIGNAL( 'lastWindowClosed()' ), app, SLOT( 'quit()' ) )\n\n # execute application\n sys.exit(app.exec_())\n","sub_path":"WinstonLutsTest/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643005140","text":"import os\nimport argparse\nimport copy\nimport math\nimport json\nfrom time import time\nimport logging, pickle\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.utils import clip_grad_norm_\nimport codecs\n\n\nclass net(nn.Module):\n def __init__(self, args):\n super(net, self).__init__()\n self.args = args\n vocab_size = args.vocab_size\n h_model= args.h_model\n d_model = args.d_model\n #word_dim gives the size of char embeddings\n char_dim = args.char_len\n # hidden_dim gives the dimension of the number of hidden layers in char,word and sentence GRUs\n hidden_dim = args.hidden_dim\n #doc_len is the maximum number of sentences; sent_len is the maximum numebr of words in a sentence\n doc_len = args.doc_len\n sent_len = args.sent_len\n word_len=args.word_len\n\n\n # Define the Embedding layer for the chars\n self.emb_layer = nn.Embedding(vocab_size, char_dim)\n \n self.char_conv=[nn.Conv2d(1,256,[i,15]) for i in (7,7,3,3,3,3)] \n self.char_pool=[nn.MaxPool2d([22+i,1]) for i in (0,0,4,4,4,4)]\n # Pass the char Embeddings through the GRU\n self.char_GRU = nn.GRU(char_dim, hidden_dim, 1, bidirectional=True, batch_first=True)\n \n \n \n #define layers for words\n self.word_conv=[nn.Conv2d(1,256,[i,15]) for i in (7,7,3,3,3,3)] \n self.word_pool=[nn.MaxPool2d([22+i,1]) for i in (0,0,4,4,4,4)]\n # Pass the word Embeddings through the GRU\n self.word_GRU = nn.GRU(word_dim, hidden_dim, 1, bidirectional=True, batch_first=True)\n\n \n \n #define layers for sentences\n self.word_conv=[nn.Conv2d(1,256,[i,15]) for i in (7,7,3,3,3,3)] \n self.sent_pool=[nn.MaxPool2d([22+i,1]) for i in (0,0,4,4,4,4)]\n # Pass the word Embeddings through the GRU\n self.sent_GRU = nn.GRU(d_model, hidden_dim, 1, bidirectional=True, batch_first=True)\n\n \n # Compute the abstract features\n self.doc_linear = nn.Linear(d_model, d_model)\n \n\n\n def forward(self, x):\n # ndocs is the batch size, i.e., number of documents in a batch\n ndocs = x.size(0)\n doc_len = x.size(1)\n sent_len = x.size(2)\n word_len= x.size(3)\n\n # x will have shape (ndocs, doc_len, sent_len,word_len)\n\n # Get the embeddings of the words; embeddings will be of shape (ndocs, doc_len, sent_len, word_len, emb_dim)\n x = self.emb_layer(x)\n char_dim = x.size(-1)\n x = x.reshape((-1,1, word_len,char_dim))\n #x = char_conv(x)\n #x = char_pool(x).squeeze()\n #print('hereee',x.shape)\n# print(type(x))\n x = [self.char_pool[j](self.char_conv[j](x)).squeeze() for j in range(6)]#char_rep\n \n print('hereee',x.shape)\n x = torch.cat(x,dim=0)\n \n x = x.reshape(-1, doc_len, d_model)\n x, _ = self.char_GRU(x)\n \n\n x = self.char_pool(x.reshape(-1,1,1, word_len))\n #Pass through the word GRU. It expects input in the form (batch, seq_len, input_size)\n # x = word_conv(x)\n #x = word_pool_pool(x).squeeze()\n x = [self.word_pool[j](self.word_conv[j](x)).squeeze() for j in range(len(word_pool))]#word_rep\n x = torch.cat(x,dim=0)\n x = x.reshape(-1, doc_len, d_model)\n x,_ = self.word_GRU(x)\n x = self.word_pool(x.reshape(-1,1,sent_len, 1))\n #Pass through the sentence GRU. It expects input in the form (batch, seq_len, input_size)\n #x = sent_conv(x)\n #x = sent_pool(x).squeeze()\n x = [self.sent_pool[j](self.sent_conv[j](x)).squeeze() for j in range(len(sent_pool))]#sent_rep\n x = torch.cat(x,dim=0)\n x = x.reshape(-1, doc_len, d_model)\n x,_ = self.sent_GRU(x)\n \n #Average pool to get Document representation\n doc_rep = self.sent_pool(x.reshape(-1,1,doc_len, 1))\n \n #Pass the doc_rep through a linear layer and tanh non-linearity\n doc_rep = torch.softmax(self.doc_linear(doc_rep))\n \n\n\n\ndef getDocumentVector(path, doc_len, sent_len, word_len, char2id):\n X=np.zeros((batch_size,Nsent,Nword,Nchar), dtype=np.long)\n with open(path, encoding='utf-8')as f:\n doc = f.read().split('\\n')[:doc_len]\n #print('doc',doc)\n for sent_no, line in enumerate(doc):\n words = line.split(' ')[:sent_len]\n #print('words',words)\n for word_no, word in enumerate(words):\n tokens=[i for i in word][:word_len]\n #print('tokens',tokens)\n for char_no,char in enumerate(tokens):\n #print('here ',sent_no,word_no,char_no)\n try:\n X[sent_no,word_no,char_no] = char2id[char]\n except KeyError:\n X[sent_no,word_no,char_no] = char2id['UNK']\n \n return X\n\n\n\nimport torch\nfrom torch.utils import data\n\nclass Dataset(data.Dataset):\n 'Characterizes a dataset for PyTorch'\n def __init__(self, list_IDs, labels, path, doc_len, sent_len, word_len, char2id):\n 'Initialization'\n self.labels = labels\n self.list_IDs = list_IDs\n self.path = path\n self.doc_len = doc_len\n self.sent_len = sent_len\n self.word_len = word_len\n self.char2id = char2id\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.list_IDs)\n\n def __getitem__(self, index):\n 'Generates one sample of data'\n # Select sample\n ID = self.list_IDs[index]\n\n # Load data and get label\n path = os.path.join(self.path, ID)\n X = getDocumentVector(path, self.doc_len, self.sent_len, self.word_len, self.char2id)\n y = self.labels[ID]\n y = y[:doc_len]\n y.extend([0 for _ in range(len(y),doc_len)])\n y = list(map(float, y))\n y = np.array(y)\n\n return X, y\n\n\nimport json\nwith open('/home/shakeel/cnn2/char2id.json') as f:\n char2id = json.load(f)\n\n\n\nclass arguments():\n def __init__(self, **kwargs):\n self.vocab_size = kwargs['vocab_size']\n self.char_len = kwargs['char_len']\n self.hidden_dim = kwargs['hidden_dim']\n self.sent_len = kwargs['sent_len']\n self.word_len = kwargs['word_len']\n self.device = kwargs['device']\n self.ndocs = kwargs['ndocs']\n self.h_model=kwargs['h_model']\n self.d_model=kwargs['d_model']\n self.doc_len = kwargs['doc_len']\n\nvocab_size, char_len = 63,10\nword_len = 50\nsent_len =100\nword_len=15\ndoc_len=50\nndocs = 16\nh_model, d_model = 8, 512\nhidden_dim = 256\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\nprint(device)\nargs_dict = {\n 'vocab_size':vocab_size,\n 'char_len':char_len,\n 'ndocs':ndocs,\n 'doc_len':doc_len,\n 'word_len':word_len,\n 'sent_len':sent_len,\n 'hidden_dim':hidden_dim,\n 'device':device,\n 'h_model':h_model,\n 'd_model':d_model,\n}\nargs = arguments(**args_dict)\n\n\nimport pickle\nwith open('/home/shakeel/cnn2/args.json','wb') as f:\n pickle.dump(args, f)\nwith open('/home/shakeel/cnn2/args_dict.json','wb') as f:\n pickle.dump(args_dict, f)\n\n\ndef eval_network(network, data_iter, criterion, device):\n network.eval()\n total_loss = 0\n batch_num = 0\n accuracy = 0.0\n# iteration=3\n for features, targets in data_iter:\n features,targets = Variable(features.type(torch.long)).to(device), Variable(targets.float()).to(device)\n probs = network(features)\n loss = criterion(probs,targets)\n y_pred = np.round(probs.clone().detach().numpy())\n y_true = targets.clone().detach().numpy()\n# print(y_true.shape, y_pred.shape, probs)\n accuracy += accuracy_score(y_true,y_pred )\n total_loss += loss\n batch_num += 1\n# #PLEASE REMOVE THE NEXT LINE\n# iteration -= 1\n# if iteration == 1: break\n loss = total_loss / batch_num\n accuracy /= batch_num\n network.train()\n return loss, accuracy\n\n\ndef train(args, epochs=1, previous_model=False):\n \n with open('/home/shakeel/cnn/partition.json',encoding='utf-8') as f:\n partition = json.load(f)\n with open('/home/shakeel/cnn/labels.json',encoding='utf-8') as f:\n labels = json.load(f)\n \n \n #Step3: Create the train and validation datasets\n train_dataset = Dataset(partition['train'], labels['train'], '/home/shakeel/cnn/data/train', \n args.doc_len, args.sent_len, args.word_len, char2id)\n valid_dataset = Dataset(partition['valid'], labels['valid'], '/home/shakeel/cnn/data/valid/', \n args.doc_len, args.sent_len, args.word_len, char2id)\n \n #Step 4: Create the dataloaders\n train_dataloader = data.DataLoader(train_dataset, args.ndocs, shuffle=True)\n valid_dataloader = data.DataLoader(valid_dataset, args.ndocs, shuffle=False)\n \n # Step 5: Create the network\n \n network = net(args).to(args.device)\n\n # Step 6: Loss function\n criterion = nn.BCELoss()\n \n # Step 7: model info\n print(network)\n \n params = sum(p.numel() for p in list(network.parameters())) / 1e6\n print('#Params: %.1fM' % (params))\n \n #Step 8: Create the optimizer\n optimizer = torch.optim.Adam(network.parameters(),lr=1e-3)\n \n \n # Step 9: Early Stopping details\n min_val_loss = float('inf')\n n_epochs_stop = 5\n epochs_no_improve = 0\n network.cuda()\n network.train()\n type(type(network))\n \n t1 = time()\n #Step 10: Start traing for epochs\n for epoch_id in range(epochs):\n for i,(x,y) in enumerate(train_dataloader):\n print(args.device)\n x = Variable(x.type(torch.long)).to(device)\n y = Variable(y.float()).to(device)\n print(type(x), type(y))\n probs = network(x).to(device)\n loss = criterion(probs,y)\n optimizer.zero_grad()\n loss.backward()\n clip_grad_norm_(network.parameters(), 1.0)\n optimizer.step()\n if i%10==0:\n print('Epoch %d, Batch ID:%d Loss:%f' %(epoch_id, i,loss))\n \n val_loss, acc = eval_network(network, valid_dataloader, criterion, args.device)\n print('Epoch %d Loss:%f Accuracy:%f' %(epoch_id,loss, acc))\n # If the validation loss is at a minimum\n if val_loss < min_val_loss:\n # Save the model\n torch.save({\n 'model_state_dict': network.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }, '/home/shakeel/cnn/checkpoints/best_model.pt')\n# torch.save(network, './checkpoints/best_model.pt')\n epochs_no_improve = 0\n min_val_loss = val_loss\n else:\n epochs_no_improve += 1\n # Check early stopping condition\n if epochs_no_improve == n_epochs_stop:\n print('Early stopping!')\n # Load in the best model\n network = torch.load('/home/shakeel/cnn/checkpoints/best_model.pt')\n\n t2 = time()\n logging.info('Total Cost:%f h'%((t2-t1)/3600))\n\n\n\nbatch_size = 64 # No. of documents in a batch\nNsent=100\nNword=50\nNchar=15\ntorch.cuda.set_device(1)\ntrain(args, True)\n","sub_path":"2g2g2g.py","file_name":"2g2g2g.py","file_ext":"py","file_size_in_byte":11504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"615857786","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup\nfrom crawler import Crawler\n\n\nclass CGoogle(Crawler):\n def __init__(self):\n Crawler.__init__(self)\n self.url = 'http://www.google.com/search'\n self.params = {\"tbs\": \"li:1\"}\n\n def get_count(self, phrase):\n self.params['q'] = \"'\" + phrase + \"'\"\n req = requests.get(self.url, self.params)\n soup = BeautifulSoup(req.text, \"html.parser\")\n try:\n res = soup.find('div', {'id': 'b_tween'}).text\n print(res)\n except AttributeError:\n print(\"Bing Problem\")\n return 0\n words = res.split(\" \")\n for word in words:\n if re.match(r'^[0-9.,]*$', word):\n return word.replace(',', '').replace('.', '')\n return 0\n\n","sub_path":"mycrawler/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"447550763","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Person, Phone, Email\nfrom .forms import PersonModelForm, PhoneModelForm, EmailModelForm\n\n# Show all contacts\ndef contact_list_view(request):\n # Changing None to empty string is just for estetic view while viewing contacts.\n\n contacts = Person.objects.all() \n for contact in contacts:\n try:\n contact.phone = contact.phone_set.get(person=contact).phone\n if contact.phone == None: contact.phone = \"\"\n contact.email = contact.email_set.get(person=contact).email\n if contact.email == None: contact.email = \"\"\n except Phone.DoesNotExist:\n print(\"phone does not exist\")\n except Email.DoesNotExist:\n print(\"email does not exist\")\n\n context = {\"contacts\": contacts}\n return render(request, \"contacts/list.html\", context)\n\n\n# Show specific contact\ndef contact_detail_view(request, id):\n contact = Person.objects.get(pk=id)\n try:\n contact.phone = contact.phone_set.get(person=contact).phone\n if contact.phone == None: contact.phone = \"\"\n contact.email = contact.email_set.get(person=contact).email\n if contact.email == None: contact.email = \"\"\n except Phone.DoesNotExist:\n print(\"phone does not exist\")\n except Email.DoesNotExist:\n print(\"email does not exist\")\n\n context = {\"contact\": contact}\n return render(request, \"contacts/details.html\", context)\n\n\n# Add new contact\ndef contact_add_view(request):\n if request.method == 'POST':\n person_form = PersonModelForm(request.POST or None)\n phone_form = PhoneModelForm(request.POST or None)\n email_form = EmailModelForm(request.POST or None)\n\n if person_form.is_valid() and phone_form.is_valid() and email_form.is_valid():\n person = Person.objects.create(**person_form.cleaned_data)\n phone = Phone.objects.create(person=person, phone=phone_form.cleaned_data['phone'])\n email = Email.objects.create(person=person, email=email_form.cleaned_data['email'])\n \n person_form = PersonModelForm()\n phone_form = PhoneModelForm()\n email_form = EmailModelForm()\n return redirect('contact-list')\n else:\n person_form = PersonModelForm()\n phone_form = PhoneModelForm()\n email_form = EmailModelForm()\n\n context = {'person_form': person_form, 'phone_form': phone_form, 'email_form': email_form}\n return render(request, \"contacts/create.html\", context)\n\n\n# Edit contact\ndef contact_update_view(request, id):\n contact = Person.objects.get(pk=id)\n if Phone.objects.filter(person=contact):\n contact_phone = Phone.objects.get(person=contact)\n else:\n contact_phone = Phone(person = contact, phone=None)\n if Email.objects.filter(person=contact):\n contact_email = Email.objects.get(person=contact)\n else:\n contact_email = Email(person = contact, email=None)\n\n if request.method == 'POST':\n person_form = PersonModelForm(request.POST or None, instance=contact)\n phone_form = PhoneModelForm(request.POST or None, instance=contact_phone)\n email_form = EmailModelForm(request.POST or None, instance=contact_email)\n\n if person_form.is_valid() and phone_form.is_valid() and email_form.is_valid():\n person_form.save()\n phone_form.save()\n email_form.save()\n return redirect('contact-list')\n else:\n person_form = PersonModelForm(instance=contact)\n phone_form = PhoneModelForm(instance=contact_phone)\n email_form = EmailModelForm(instance=contact_email)\n context = {'person_form': person_form, 'phone_form': phone_form, 'email_form': email_form, 'id': id}\n return render(request, \"contacts/update.html\", context)\n\n\n# Delete contact\ndef contact_delete_view(request, id):\n contact = get_object_or_404(Person, pk=id)\n if request.method == 'POST':\n if Phone.objects.get(person=contact).phone or Email.objects.get(person=contact).email:\n return HttpResponse(\"

You cannot delete a person who has phone or email

\")\n else:\n contact.delete()\n return redirect('contact-list')\n return render(request, \"contacts/delete.html\")\n\ndef set_email(request, id):\n person = Person.objects.get(pk=id)\n if request.method == 'POST':\n email_form = EmailModelForm(request.POST or None)\n\n if email_form.is_valid():\n email_obj = Email.objects.get(person=person)\n email_obj.email = email_form.cleaned_data['email']\n email_obj.save()\n return redirect('contact-list')\n else:\n email_form = EmailModelForm()\n\n context = {'email_form': email_form, \"id\": id}\n return render(request, \"contacts/set_email.html\", context)\n\ndef set_phone(request, id):\n person = Person.objects.get(pk=id)\n if request.method == 'POST':\n phone_form = PhoneModelForm(request.POST or None)\n\n if phone_form.is_valid():\n phone_obj = Phone.objects.get(person=person)\n phone_obj.phone = phone_form.cleaned_data['phone']\n phone_obj.save()\n return redirect('contact-list')\n else:\n phone_form = PhoneModelForm()\n\n context = {'phone_form': phone_form, \"id\": id}\n return render(request, \"contacts/set_phone.html\", context)\n\ndef search_view(request):\n querry = request.GET['q']\n querryset = querry.split(\" \")\n print(querry)\n if len(querryset) > 1:\n if Person.objects.filter(first_name=querryset[0], last_name=querryset[1]):\n contacts = Person.objects.filter(first_name=querryset[0], last_name=querryset[1])\n for contact in contacts:\n try:\n contact.phone = contact.phone_set.get(person=contact).phone\n if contact.phone == None: contact.phone = \"\"\n contact.email = contact.email_set.get(person=contact).email\n if contact.email == None: contact.email = \"\"\n except Phone.DoesNotExist:\n print(\"phone does not exist\")\n except Email.DoesNotExist:\n print(\"email does not exist\")\n else:\n print(\"here!\")\n if Person.objects.filter(first_name=querry):\n contacts = Person.objects.filter(first_name=querry)\n for contact in contacts:\n try:\n contact.phone = contact.phone_set.get(person=contact).phone\n if contact.phone == None: contact.phone = \"\"\n contact.email = contact.email_set.get(person=contact).email\n if contact.email == None: contact.email = \"\"\n except Phone.DoesNotExist:\n print(\"phone does not exist\")\n except Email.DoesNotExist:\n print(\"email does not exist\")\n elif Person.objects.filter(last_name=querry):\n contacts = Person.objects.filter(last_name=querry)\n for contact in contacts:\n try:\n contact.phone = contact.phone_set.get(person=contact).phone\n if contact.phone == None: contact.phone = \"\"\n contact.email = contact.email_set.get(person=contact).email\n if contact.email == None: contact.email = \"\"\n except Phone.DoesNotExist:\n print(\"phone does not exist\")\n except Email.DoesNotExist:\n print(\"email does not exist\")\n elif Phone.objects.filter(phone=querry):\n phones = Phone.objects.filter(phone=querry)\n contacts = []\n for element in phones:\n contacts.append(element.person)\n for contact in contacts:\n try:\n contact.phone = contact.phone_set.get(person=contact).phone\n if contact.phone == None: contact.phone = \"\"\n contact.email = contact.email_set.get(person=contact).email\n if contact.email == None: contact.email = \"\"\n except Phone.DoesNotExist:\n print(\"phone does not exist\")\n except Email.DoesNotExist:\n print(\"email does not exist\")\n elif Email.objects.filter(email=querry):\n emails = Email.objects.filter(email=querry)\n contacts = []\n for element in emails:\n contacts.append(element.person)\n for contact in contacts:\n try:\n contact.phone = contact.phone_set.get(person=contact).phone\n if contact.phone == None: contact.phone = \"\"\n contact.email = contact.email_set.get(person=contact).email\n if contact.email == None: contact.email = \"\"\n except Phone.DoesNotExist:\n print(\"phone does not exist\")\n except Email.DoesNotExist:\n print(\"email does not exist\")\n else:\n contacts = []\n\n context = {\"contacts\": contacts}\n return render(request, \"contacts/list.html\", context)\n","sub_path":"phone_book/contacts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"494323423","text":"import unittest\nfrom trello.boards import Boards\nfrom pymongo import MongoClient\n\n\nclass TestBoard(unittest.TestCase):\n\n def setUp(self):\n client = MongoClient()\n self.db = client.mongodb_course\n\n def tearDown(self):\n self.db.mongodb_course.drop()\n\n def test_create_board(self):\n board_name = 'Board Fake'\n boards = Boards(self.db)\n board = boards.create(board_name)\n self.assertEqual(board.name, 'Board Fake')\n board_model = self.db.boards.find_one({\"_id\": board.id})\n self.assertEqual(board_model[\"name\"], 'Board Fake')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"644168177","text":"from discord.ext import commands\n#Member Join\n\nclass member_joinleave(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n # Member Join\n @client.event\n async def on_member_join(member):\n print(f\"{member}, what a mistake they made.\")\n await member.send(\"Welcome, this is a closed invite server. Use .start to begin\")\n\n # Member Leave\n @client.event\n async def on_member_remove(member):\n print(f\"{member} has left.\")\n\ndef setup(client):\n client.add_cog(member_joinleave(client))","sub_path":"cogs/member_joinleave.py","file_name":"member_joinleave.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"204620055","text":"from algorithm.log_star_MIS import LogStar\nfrom network.network import Network\nfrom graph_tool.all import *\nfrom graph_generators.n_unit_disk_graph import getUnitDiskGraph\n\nn = 30\nadjacencyMatrix = getUnitDiskGraph(n)\n\n# tworzenie sieci\nnetwork = Network(adjacencyMatrix)\n\nalg = LogStar()\n\nnetwork.algorithm = alg\nalg.network = network\n\nnodes = network.ndList\n\nfor node in nodes:\n alg.initiateNode(node)\n\nnetwork.executeAlgorithm()\n\n# czesc rysujaca\ng = Graph(directed=False)\n\n# dodawanie wezlow - indeksowanie bedzie sie zgadzalo\nfor node in nodes:\n g.add_vertex()\n \n# dodawanie krawedzi - tu trzeba isc po macierzy incydencji\n# i - wiersze macierzy\n# j - kolumny macierzy\n# n - ilosc wezlow\n\nn = len(adjacencyMatrix)\n\nfor i in range(n):\n for j in range(i+1, n):\n counter = adjacencyMatrix[i][j]\n while counter > 0:\n g.add_edge(g.vertex(i), g.vertex(j))\n counter -=1\n \n# przypisywanie wlasciwosci\nmis = g.new_vertex_property(\"bool\") \n\nfor node in nodes:\n mis[g.vertex(node.ID)] = (node.memory['state'] == 'dominator')\n\npos = sfdp_layout(g, cooling_step=0.99)\ngraph_draw(g, output_size=(600, 600), vertex_text = g.vertex_index, vertex_color=mis,\n vertex_fill_color=mis, vertex_size=20, edge_pen_width=1.2,\n output=\"MIS_unitDisk.png\")\n","sub_path":"drawing/drawing_MIS_unitDisk.py","file_name":"drawing_MIS_unitDisk.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"301060439","text":"from django.urls import path, include\nfrom app01 import views\n\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('index/', views.IndexView.as_view(), name='index'),\n path('goods_list/', views.GoodsListView.as_view(), name='goods_list'),\n path('add_goods/', views.AddGoodsView.as_view(), name='add_goods'),\n path('goods_type/', views.GoodsTypeView.as_view(), name='goods_type'),\n path('comment/', views.CommentView.as_view(), name='comment'),\n path('inventory_management/', views.InventoryManagementView.as_view(), name='inventory_management'),\n path('order_list/', views.OrderListView.as_view(), name='order_list'),\n path('membership_list/', views.MemberShipList.as_view(), name='membership_list'),\n path('system_setting/', views.SystemSettingView.as_view(), name='system_setting'),\n path('message/', views.MessageView.as_view(), name='message'),\n]\n","sub_path":"app01/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"429325819","text":"#\n#\tProject: \t\tc-home\n#\tModule: \t\tcdaCollect\n#\tDescription:\tcollect data via cmos sensor, detect and alert process\n#\t\t\t\t\tthe data is analyzed using background subtraction and Lucas_Kanade Optical Flow analysis\n#\t\t\t\t\tif the algorithm detects a fall an alert is triggered using a RESTful API in Goolge Cloud\n#\tLicense: \t\tGPLv3\n#\tVersion:\t\t1.1.1\n#\tDate Created:\t2/12/2019\n#\tDate Updated\t3/15/2019\n#\tRepo:\t\t\tc-home1/cdau\n# \tModified:\t\t2/12/2019 - added these comments \n#\nfrom cdaConfig import * \nimport cdaEventTrig\nimport io\nimport os\nimport sys\nimport time\nfrom time import sleep\nfrom datetime import datetime\nimport imutils\nimport cv2\nimport picamera\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport subprocess\nfrom threading import Thread\n\nclass Collect:\n\t#\tThe collect class: Streams and Records Video\n\t#\n\t#\t0. configures the sensor's (Sony IMX219PQ CMOS image sensor)settings\n\t#\t2. creates an array,rawCapture,for storing a frame which is a NumPy ndarray with the dims of the cam's resolution 1280x960)\n\t#\t3. creates a video stream using capture_continious\n\t#\t3. encodes the image as JPEG use_video_port=True\n\t#\t4. PiCameraCircularIO creates an in-memory ring buffer - seconds defines the buffer size it's not self evident\n\t#\t5. the camera begins recording with the video splitter_port=1 the default \n\t#\t6. a new thread is created to stream, do the actual video i/o \n\t\n\tdef __init__(self): \n\t\tself.camera = PiCamera()\n\t\tself.camera.resolution = (cam_wide, cam_hi)\n\t\tself.camera.framerate = cam_framerate\n\t\tself.camera.exposure_mode = cam_exposure_mode \n\t\tself.camera.shutter_speed = cam_shutter_speed \n\t\tself.camera.iso = cam_iso\n\t\t# creates the rawCapture 3D array object a NumPy ndarray (N-dimential array) with the dims of 1280 (cam_wide) by 960 (cam_hi) \n\t\tself.rawCapture = PiRGBArray(self.camera, size=self.camera.resolution)\n\t\t# capture_continious creates a network stream of 1280x960 NumPy ndarray objects encoded to be JPEG by use_video_port = True \n\t\tself.stream = self.camera.capture_continuous(self.rawCapture, format=\"bgr\", use_video_port=True)\n\t\tself.frame = None\n\t\tself.stopped = False\t# not used \n\t\tself.record = picamera.PiCameraCircularIO(self.camera, seconds=1, splitter_port=1)\n\t\tself.camera.start_recording(self.record, format='h264', splitter_port=1)\n\t\tself.camera.wait_recording(timeout=2, splitter_port=1)\n\t\tprint(\"2. Capture Continious ....\",datetime.now().strftime(\"%H:%M:%S.%f\")[:-4])\n\t\t\n\tdef start(self): \t# assigns the frame capture i/o to a thread\n\t\tt = Thread(target=self.update, args=()) # target update https://pymotw.com/2/threading/\n\t\tt.daemon = True\n\t\tt.start()\n\t\treturn self\n\t\t\n\tdef update(self):\n\t\tprint (\"3. Start Thread Loop ....\",datetime.now().strftime(\"%H:%M:%S.%f\")[:-4])\n\t\tfor f in self.stream:\t\t\t\t# grab all the bytes in the NumPy ndarray dims 1280x960\n\t\t\tself.frame = f.array \t\t\t# frame = ndarray \n\t\t\tself.rawCapture.truncate(0) \t# empties rawcapture ndarray\n\t\t\tif self.stopped:\t\t\t\t# this flag is never set to True\n\t\t\t\tprint (\"Stop Thread Loop ....\",datetime.now().strftime(\"%H:%M:%S.%f\")[:-4])\n\t\t\t\tself.stream.close()\n\t\t\t\tself.rawCapture.close()\n\t\t\t\tself.camera.close()\n\t\t\t\treturn\n\n\tdef read(self): # the return self.frame is the mechanism of passing the 1280x960 ndarray encoded as JPEG to detect for analysis\n\t\tself.camera.annotate_text = 'Recording ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\t\treturn self.frame\t\n\t\t\t\t\t\n\tdef trig(self):\n\t\t#print (\"Output Write Wait ....\",datetime.now().strftime(\"%H:%M:%S.%f\")[:-4])\n\t\tself.camera.annotate_text = 'Program Wait ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\t\tself.camera.wait_recording(timeout=5, splitter_port=1) # reduced wait because of logic in fall\n\t\tself.camera.annotate_text = 'Program Ended ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\t\t# indicate that the thread should be stopped\n\t\twith self.record.lock:\n\t\t# Find the first header frame in the video\n\t\t\tfor frame in self.record.frames:\n\t\t\t\tif frame.frame_type == picamera.PiVideoFrameType.sps_header:\n\t\t\t\t\tself.record.seek(frame.position)\n\t\t\t\t\tbreak\n\t\t\t# Write the rest of the stream to disk\n\t\t\tfile_name = \"cdaVideo\" + \".h264\"\n\t\t\twith io.open(file_name, 'wb') as output:\n\t\t\t\tprint (\"Output Write ....\",datetime.now().strftime(\"%H:%M:%S.%f\")[:-4], file_name)\n\t\t\t\toutput.write(self.record.read())\n\t\t\t\toutput.close()\n\t\t\t\tcdaEventTrig.cdaEventTrig() # youtube upload and event trigger\n\t\treturn\n##----------------------------------------------------------------------------------------------- \n#\n#\n# \n##----------------------------------------------------------------------------------------------- \ndef detect():\n\t#\tThe detect method: Analyzes the input from the sensor and triggers the appropriate alert\n\t#\n\t#\t1. \tinitiates the collection process\n\t#\t2. \tloads initial frame into control variable image1\n\t#\t3.\tcreates an infinent loop that grabs new frames \n\t#\t4. \ta foreground mask is created to compare to determine if any event conditions are met\n\t#\t\ta.\tmovement\n\t#\t\tb.\tenter room\n\t#\t\tc. \texit room\n\t#\t\td. \tfall\n\t#\t\t* there are also analytics to determine if an intruder is entering, the older adult is wandering or if theres no movement \n\t\t\t\n\t# \tbaseline times\n\tstart_time = time.time()\n\tmotionTime = time.time()\n\tnoMotionTime = time.time()\n\t#\tthresh fall coordinates\n\tfall_lasty = 0\n\tfall_base = cam_hi\n\t#\tcounters\n\tfst_time = 0\n\tfall_cnt = 0\n\tfall_cntr = 0\n\tgone_cnt = 0\n\tgone_cntr = 0\n\tFPS = 0\n\tdropped_frames = 0\n\tframe_count = 0\n\tfall_frame = 0\n\tgone_frame = 0\n\tmotion_cntr = 0\n\t#\tconditions\n\tfall_flag = False\n\tstage_left = False\n\tstage_right = False\n\tfirst_image = False\n\tfirst_time = False\n\tmotionFirst = False\n\t###\n\t#\n\t###\n\tprint (\"1. Initializing Camera ....\",datetime.now().strftime(\"%H:%M:%S.%f\")[:-4])\n\tvs = Collect().start()\t\t\t\t \n\ttime.sleep(2) \n\tprint (\"4. Start Detect ....\",datetime.now().strftime(\"%H:%M:%S.%f\")[:-4])\n\timage2 = vs.read() \t\t\t\t\t\t\t\t\t\t# passes self.frame ndarray 1280x960 JPEG encoded\n\timage1 = image2 \n\tgrayimage1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY) \t# convert image1 (control image) to gray scale (only done first time)\n\tstill_scanning = True\t\n\twhile still_scanning:\t# same as while True \t\n\t\timage2 = vs.read()\n\t\tframe_cntr = 0\n\t\tframe_count += 1\n\t\tduration = float(time.time() - start_time)\n\t\tfps = float(frame_count / duration)\n\t\t# Begin image processing \n\t\tbiggest_area = 5000 \t\t\t# 5000 is the number the contour area - w * h must exceed to qualify as motion BAH \t\n\t\tgrayimage2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)\n\t\tdifferenceimage = cv2.absdiff(grayimage1, grayimage2)\n\t\tdifferenceimage = cv2.blur(differenceimage,(10,10)) \n\t\tretval, thresholdimage = cv2.threshold(differenceimage, 25, 255, cv2.THRESH_BINARY) # foreground mask\n\t\tthresholdimage, contours, hierarchy = cv2.findContours(thresholdimage, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\t\ttotal_contours = len(contours)\n\t\tgrayimage1 = grayimage2\n\t\tif total_contours == 0:\t\t# I don't know why we get blank frames\n\t\t\tdropped_frames += 1 \t\t\n\t\tif stage_left == True or stage_right == True: \n\t\t\tgone_cntr += 1\n\t\telse:\n\t\t\tgone_cntr = 0\n\t\tif fall_flag == True:\n\t\t\tfall_cntr += 1\n\t\telse:\n\t\t\tfall_cntr = 0\n\t\tcnx = 0 \n\t\tcny = 0 \n\t\tcox = 0 \n\t\tcoy = 0\n\t\tfall_y = 0\n\t\t# find contour with biggest area\n\t\tfor c in contours:\n\t\t\tframe_cntr += 1\n\t\t# get area of next contour\n\t\t\tfound_area = cv2.contourArea(c)\n\t\t\t(ax, ay, aw, ah) = cv2.boundingRect(c)\t\t# used for debugging\n\t\t#\n\t\t# Check --- 1: Hi x or Low x = enter or leave --- \n\t\t#\t\t\t2: Rapidly reducing y value = fall\n\t\t#\t\n\t\t\tif ax > cnx:\n\t\t\t\tcnx = ax # Left - left_x = ax\n\t\t\tif ay > cny:\n\t\t\t\tcny = ay # Low\n\t\t\t\tfall_y = ay\n\t\t\tif ax < cox or ax != 0:\n\t\t\t\tcox = ax # Right - right_x = ax\n\t\t\tif ay < coy or ay != 0:\n\t\t\t\tcoy = ay # Hi\n\t\t\tif found_area > biggest_area: \n\t\t\t\tbiggest_area = found_area\n\t\t\t\tif not motionFirst:\n\t\t\t\t\tprint (\"5. First Motion at ....\", datetime.now().strftime(\"%H:%M:%S.%f\")[:-4], (\"Frame:%3i Counter:%3i Contours:%4i Area:%4i Left:%4i Right:%4i Hi:%4i Low:%4i Fall:%4i \") % (frame_count, frame_cntr, total_contours, found_area, cnx, cox, coy, fall_y, fall_base))\n\t\t\t\t\tfstMotionTime = time.time()\n\t\t\t\tmotionTime = time.time() \n\t\t\t\tmotionFirst = True\n\t\t\t#\n\t\t\t#\tAnalyze for fall !!!!!!!!!! optical flow \n\t\t\t#\n\t\t\t\tif fall_y >= fall_base and not stage_left and not stage_right:\n\t\t\t\t\tfall_flag = True\n\t\t\t\t\tfirst_time = True\n\t\t\t\t\tfall_frame = frame_count\n\t\t\t\t\tfall_cntr += 1\n\t\t\t\t\tfall_base = fall_y\n\t\t\t\t\tfnx = cnx \n\t\t\t\t\tfny = cny \n\t\t\t\t\tfox = cox \n\t\t\t\t\tfoy = coy\n\t\t\t\t\tfall_area = found_area\n\t\t\t\telse:\n\t\t\t\t\tif fall_flag == True:\t\t\t\t# motion no fall\n\t\t\t\t\t\tif frame_count >= fall_frame + 3: \n\t\t\t\t\t\t\tfall_flag = False\n\t\t\t\t\t\t\tfall_frame = 0\n\t\t\t\t\t\t\tfall_cntr + 1\n\t\t\t\t\t\t\tmotion_cntr += 1\n\t\t\t\t\t\t\tfall_cnt = 0\n\t\t\t\t\t\t\tfall_base = int(fall_y * 1.25)\n\t\t\t\t\t\t\tprint (\"Movement at ....\", datetime.now().strftime(\"%H:%M:%S.%f\")[:-4], (\"Frame:%3i Counter:%3i Left:%4i Right:%4i Hi:%4i Low:%4i Fall:%4i Contours:%4i Area:%4i\") % (frame_count, frame_cntr, cnx, cox, coy, fall_y, fall_base, total_contours, found_area))\n\t\t\t\t\t\tfall_cntr + 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tmotion_cntr += 1\n\t\t\t\t\t\tfall_cntr = 0\n\t\t\t\t\t\tfall_cnt = 0\n\t\t\t\t\t\tfall_base = int(fall_y * 1.25)\n\t\t\t\t#\tleaving the room !!!!!! need to add occupency logic including counter and time of day\n\t\t\t\t#\t!!!!!!! motion tracking \n\t\t\t\tif cnx <= cam_wide *.05 and fall_flag == False:\t# Leaving the room\n\t\t\t\t\tstage_left = True\n\t\t\t\t\tfirst_time = True\n\t\t\t\t\tgone_frame = frame_count\n\t\t\t\t\tgone_cntr + 1\n\t\t\t\telse:\n\t\t\t\t\tif stage_left == True: \n\t\t\t\t\t\tif frame_count >= gone_frame + 3: \n\t\t\t\t\t\t\tstage_left = False\n\t\t\t\t\t\t\tgone_frame = 0\n\t\t\t\t\t\t\tgone_cntr + 1\n\t\t\t\t\t\t\tmotion_cntr += 1\n\t\t\t\t\t\t\tgone_cnt = 0\n\t\t\t\t\t\t\tprint (\"Movement at ....\", datetime.now().strftime(\"%H:%M:%S.%f\")[:-4], (\"Frame:%3i Counter:%3i Left:%4i Right:%4i Hi:%4i Low:%4i Fall:%4i Contours:%4i Area:%4i\") % (frame_count, frame_cntr, cnx, cox, coy, fall_y, fall_base, total_contours, found_area))\n\t\t\t\t\t\tgone_cntr + 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tmotion_cntr += 1\n\t\t\t\t\t\tgone_cntr = 0\n\t\t\t\t\t\tgone_cnt = 0\n\t\t\t\tif cox >= cam_wide *.95 and fall_flag == False:\n\t\t\t\t\tstage_right = True\n\t\t\t\t\tfirst_time = True\n\t\t\t\t\tgone_frame = frame_count\n\t\t\t\t\tgone_cntr + 1\n\t\t\t\telse:\n\t\t\t\t\tif stage_right == True: \n\t\t\t\t\t\tif frame_count >= gone_frame + 3: \n\t\t\t\t\t\t\tstage_right = False\n\t\t\t\t\t\t\tgone_frame = 0\n\t\t\t\t\t\t\tgone_cntr + 1\n\t\t\t\t\t\t\tmotion_cntr += 1\n\t\t\t\t\t\t\tgone_cnt = 0\n\t\t\t\t\t\t\tprint (\"Movement at ....\", datetime.now().strftime(\"%H:%M:%S.%f\")[:-4], (\"Frame:%3i Counter:%3i Left:%4i Right:%4i Hi:%4i Low:%4i Fall:%4i Contours:%4i Area:%4i\") % (frame_count, frame_cntr, cnx, cox, coy, fall_y, fall_base, total_contours, found_area))\n\t\t\t\t\t\tgone_cntr + 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tmotion_cntr += 1\n\t\t\t\t\t\tgone_cntr = 0\n\t\t\t\t\t\tgone_cnt = 0\n\t\t\t# No Movement after fall or leaving room \n\t\t\telse:\n\t\t\t\tnoMotionTime = (time.time() - motionTime)\n\t\t\t\tif fall_flag == True and frame_count >= fall_frame +3: # Fall Logic \n\t\t\t\t\tif first_time == True:\n\t\t\t\t\t\tprint (\"Fall at ....\", datetime.now().strftime(\"%H:%M:%S.%f\")[:-4], (\"Frame:%3i Counter:%3i Left:%4i Right:%4i Hi:%4i Low:%4i Fall:%4i Contours:%4i Area:%4i\") % (frame_count, frame_cntr, fnx, fox, foy, fny, fall_base, total_contours, fall_area))\n\t\t\t\t\t\tfirst_time = False\n\t\t\t\t\tif fall_cntr >= 6: # Number of Frames since fall / no movement\n\t\t\t\t\t\tfall_cntr = 0\n\t\t\t\t\t\tfall_cnt += 1\n\t\t\t\t\t\tif fall_cnt >= 5:\n\t\t\t\t\t\t\tfall_cnt = 0\n\t\t\t\t\t\t\tprint (\"Processing Fall ....\",datetime.now().strftime(\"%H:%M:%S.%f\")[:-4],(\"%.2f fps last %i frames %3i Motion Frames %3i Dropped Frames\" %( fps, frame_count, motion_cntr, dropped_frames)))\n\t\t\t\t\t\t\tvs.trig()\n\t\t\t\telse:\n\t\t\t\t\tif stage_right or stage_left and first_time == True: # Leaving Room\n\t\t\t\t\t\tprint (\"Left the room ....\", datetime.now().strftime(\"%H:%M:%S.%f\")[:-4], (\"Frame:%3i Counter:%3i Left:%4i Right:%4i Hi:%4i Low:%4i Fall:%4i Contours:%4i Area:%4i\") % (frame_count, frame_cntr, cnx, cox, coy, fall_y, fall_base, total_contours, found_area))\n\t\t\t\t\t\tfirst_time = False\n\t\t\t\t\tif gone_cntr >= 6:\n\t\t\t\t\t\tgone_cntr = 0\n\t\t\t\t\t\tgone_cnt += 1\n\t\t\t\t\t\tif gone_cnt >= 5:\n\t\t\t\t\t\t\tprint(\"Room Empty ....\",datetime.now().strftime(\"%H:%M:%S.%f\")[:-4],(\"%.2f fps last %i frames %3i Motion Frames %3i Dropped Frames\" %( fps, frame_count, motion_cntr, dropped_frames)))\n\t\t\t\t\t\t\tgone_cnt = 0\n\t\t#\n\t\t#\tpython executes main at the bottom of the script - starts detect\n\t\t#\n\t\tif window_on:\n\t\t\tgrayimage2 = cv2.resize(grayimage2, (640, 480)) \n\t\t\t#cv2.drawContours(thresholdimage, contours, -1, (255, 255, 255), 4)\n\t\t\t#thresholdimage = cv2.resize(thresholdimage, (640, 480))\n\t\t\tcv2.imshow('Real Image (Press q in Window to Quit)', grayimage2)\n\t\t\t#cv2.imshow('Contours ', thresholdimage)\t\t# best for comparison \n\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\tcv2.destroyAllWindows()\n\t\t\t\tprint (\"Processing Stopped \",datetime.now().strftime(\"%H:%M:%S.%f\")[:-4],(\"%.2f fps last %i frames %3i Motion Frames %3i Dropped Frames\" %( fps, frame_count, motion_cntr, dropped_frames)))\n\t\t\t\tprint (\"Elapsed Time No Motion \",datetime.now().strftime(\"%H:%M:%S.%f\")[:-4],(\"Elapsed Time: %3i \" %(noMotionTime))) \n\t\t\t\tstill_scanning = False\n\t\t\n#----------------------------------------------------------------------------------------------- \n\nif __name__ == '__main__':\n\ttry:\n\t\tdetect() \t# start \n\tfinally:\n\t\tprint(\"\")\n\t\tprint(\"Exiting\" ,datetime.now().strftime(\"%H:%M:%S.%f\")[:-4])\n\t\tprint(\"+++++++++++++++++++++++++++++++++++\")\n\t\tprint(\"\")\n\t\tos._exit\n","sub_path":"cdaCollectBk.py","file_name":"cdaCollectBk.py","file_ext":"py","file_size_in_byte":13963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"158616629","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom matplotlib_examples.items import ExampleItem\nfrom scrapy.linkextractors import LinkExtractor\n\nclass ExampleSpiderSpider(scrapy.Spider):\n '''\n 爬取 matplotlib 上的 py 样例代码\n '''\n name = 'example_spider'\n allowed_domains = ['matplotlib.org']\n start_urls = ['https://matplotlib.org/examples/index.html']\n\n def parse(self, response):\n # 提取 example 信息\n example_le = LinkExtractor(restrict_css='div.toctree-wrapper.compound', deny='/index.html$')\n links = example_le.extract_links(response)\n print(len(links))\n for link in links:\n # print(link)\n yield scrapy.Request(link.url, callback=self.parse_example)\n # break\n\n def parse_example(self, response):\n # 获取 url\n source_code_le = LinkExtractor(restrict_css='a.reference.external')\n source_code_link = source_code_le.extract_links(response)[0]\n # print(source_code_link)\n source_code_url = source_code_link.url\n # print(source_code_url)\n\n example_item = ExampleItem()\n example_item['file_urls'] = [source_code_url] # 注意格式, 是一个list\n return example_item\n\n\n\nif __name__ == '__main__':\n from scrapy import cmdline\n cmdline.execute(\"scrapy crawl example_spider\".split())","sub_path":"matplotlib_examples/matplotlib_examples/spiders/example_spider.py","file_name":"example_spider.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"37064075","text":"def findAnagrams(self, s: str, p: str) -> List[int]:\n #import string\n# if len(s) < len(p): return []\n \n# p = sorted(p)\n \n# result = []\n# i = 0\n \n# while i <= (len(s) - len(p)):\n# value = sorted(s[i:i+len(p)])\n# if value == p:\n# result.append(i)\n# i += 1\n# return result\n \n \n if len(p) > len(s):\n return []\n \n hash_table = {ch:idx for idx,ch in enumerate(string.ascii_lowercase)}\n i = 0\n anagram = [0] * 26\n check = [0] * 26\n result = []\n \n for ch in p:\n anagram[hash_table[ch]] += 1\n \n \n for ch in s[:len(p) - 1]:\n check[hash_table[ch]] += 1\n","sub_path":"438_Find_All_Anagrams_in_a_String.py","file_name":"438_Find_All_Anagrams_in_a_String.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"76799937","text":"try:\r\n total_cost = 1000000\r\n annual_salary = float(input(\"Enter your annual salary:\"))\r\nexcept Exception:\r\n print(\"Enter a valid decimal number\")\r\nelse:\r\n portion_down_payment = 0.25*total_cost\r\n semi_annual_raise = 0.07\r\n r = 0.04\r\n\r\n # Calculates the monthly increase and returns the new monthly salary\r\n def salary_increment(monthly_pay, current_month, increase):\r\n if current_month % 6 == 0:\r\n monthly_pay = (monthly_pay * increase) + monthly_pay\r\n return monthly_pay\r\n\r\n def get_num_months(cost, salary, pct_saved, pct_increase):\r\n savings = 0\r\n months_saved = 0\r\n monthly_salary = salary / 12\r\n down_payment = 0.25*cost\r\n while down_payment - 100 >= savings:\r\n months_saved = months_saved + 1\r\n monthly_salary = salary_increment(monthly_salary, months_saved, pct_increase)\r\n savings = pct_saved * monthly_salary + savings + savings * (r / 12)\r\n# print(\"Savings:\", savings)\r\n return months_saved\r\n\r\n months = 36\r\n start = 0\r\n end = 1.0000\r\n steps = 0\r\n portion_saved = (start + end)/2\r\n computed_months = 0\r\n try:\r\n while computed_months != months:\r\n steps = steps + 1\r\n computed_months = get_num_months(total_cost, annual_salary, portion_saved, semi_annual_raise)\r\n if computed_months > months:\r\n start = portion_saved\r\n else:\r\n end = portion_saved\r\n if abs(end - start) <= 0.01:\r\n raise ValueError\r\n# print(\"Start:\", start, \"End:\", end)\r\n portion_saved = (start + end)/2\r\n print(\"Best Savings Rate:\", portion_saved)\r\n print(\"Steps in bisection search:\", steps)\r\n except ValueError:\r\n print(\"It is not possible to print the down payment in three years.\")","sub_path":"Assignment 1C.py","file_name":"Assignment 1C.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"617253189","text":"from django.shortcuts import render\nfrom .models import Image, Video\nfrom .forms import ImageForm\n\n# Create your views here.\ndef home(request):\n if request.method == 'POST':\n form = Image(photo='/ff.jpg')\n form.save()\n form = ImageForm()\n images = Image.objects.all()\n videos = Video.objects.all()\n context = {\n \"form\": form,\n 'image': images,\n 'video':videos,\n }\n return render(request , 'index.html' , context)\n \n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"441959467","text":"# Python CLI Cipher\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2016-07-01 22:23:37\n# @Author : Xingfan Xia (xiax@carleton.edu)\n# @Link : http://xiax.tech\n# @Version : $1.0\n\nimport random\nmessage = input(\"please enter the code you want to encrypt: \")\n\n\n#quit when the message is 'q'\nwhile message != 'q':\n shift =input(\"enter the shift you want to use: \") \n newm = \"\"\n\n #for the bonus part where we pick a number randomly between(included) 1 to 25. \n if shift == 'random':\n shift = random.randint(1,25)\n else:\n shift = int(shift)\n \n \n #we use this loop to encrypt each letter in the message.\n for c in message: \n \n c = ord(c)\n #we seperated lower and upper cases in case the user uses a large shift \n #for the lower case:\n if 97<=c<=122:\n if c+shift > 122:\n c = 96 + (((c+shift)-122)%26) \n #we used modulo here in case the user inputs a shift bigger than 26.\n newm = newm + chr(c)\n else:\n c = chr(c+shift)\n newm = newm + c\n #for the upper case\n elif 65<=c<=90:\n if c+shift > 90:\n c = 64 + (((c+shift)-90)%26)\n newm = newm + chr(c)\n else:\n c = chr(c+shift)\n newm = newm + c\n #for things other than letters\n else:\n newm = newm + chr(c)\n \n \n print (newm)\n print ('a shift of', shift, 'was used')\n #begin a new encryption. \n message = input(\"enter the text to encrypt: \")\n \n\nprint(\"done\")","sub_path":"Programming Class Resources/Projects for Students/Cipher_Done/sample answer.py","file_name":"sample answer.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"597663828","text":"from snuba.clickhouse.processors import QueryProcessor\nfrom snuba.clickhouse.query import Query\nfrom snuba.query.expressions import Expression, FunctionCall\n\nfrom snuba.request.request_settings import RequestSettings\n\n\nclass SliceOfMapOptimizer(QueryProcessor):\n \"\"\"\n Convert `arraySlice(arrayMap(...))` to `arrayMap(arraySlice(...))`. This is\n a pattern often produced by UUIDArrayColumnProcessor.\n \"\"\"\n\n def process_query(self, query: Query, request_settings: RequestSettings) -> None:\n query.transform_expressions(self._process_expressions)\n\n def _process_expressions(self, exp: Expression) -> Expression:\n if isinstance(exp, FunctionCall) and exp.function_name == \"arraySlice\":\n inner_exp = exp.parameters[0]\n\n if (\n isinstance(inner_exp, FunctionCall)\n and inner_exp.function_name == \"arrayMap\"\n ):\n lambda_fn = inner_exp.parameters[0]\n innermost_exp = inner_exp.parameters[1]\n slice_args = exp.parameters[1:]\n\n return FunctionCall(\n exp.alias,\n \"arrayMap\",\n (\n lambda_fn,\n FunctionCall(\n None, \"arraySlice\", (innermost_exp,) + slice_args,\n ),\n ),\n )\n\n return exp\n","sub_path":"snuba/query/processors/slice_of_map_optimizer.py","file_name":"slice_of_map_optimizer.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"290858986","text":"\"\"\"\nThis file is a Python port of tf_ufp.ts from Google. Here is the original\ncopyright.\n\n/* Copyright 2016 Google Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n==============================================================================*/\n\"\"\"\n\n\nimport numpy as np\nfrom typing import List, Callable, Any\nimport uuid\n\n\ndef testable() -> bool:\n \"\"\"Bootstrap unit tests; remove eventually.\"\"\"\n return True\n\n\n# ___ ___ _ _\n# | __|_ _ _ _ ___ _ _| __| _ _ _ __| |_(_)___ _ _\n# | _|| '_| '_/ _ \\ '_| _| || | ' \\/ _| _| / _ \\ ' \\\n# |___|_| |_| \\___/_| |_| \\_,_|_||_\\__|\\__|_\\___/_||_|\n\n\ndef square_error(output: float, target: float) -> float:\n e = output - target\n result = 0.5 * e * e\n return result\n\n\ndef square_error_der(output: float, target: float) -> float:\n result = output - target\n return result\n\n\nclass ErrorFunction(object):\n\n def __init__(self,\n error: Callable[[float, float], float],\n der: Callable[[float, float], float]):\n self.error = error\n self.der = der\n\n\nclass SquareErrorFunction(ErrorFunction):\n\n def __init__(self):\n super().__init__(square_error, square_error_der)\n\n\n# _ _ _ _ _ ___ _ _\n# /_\\ __| |_(_)_ ____ _| |_(_)___ _ _ | __| _ _ _ __| |_(_)___ _ _\n# / _ \\/ _| _| \\ V / _` | _| / _ \\ ' \\| _| || | ' \\/ _| _| / _ \\ ' \\\n# /_/ \\_\\__|\\__|_|\\_/\\__,_|\\__|_\\___/_||_|_| \\_,_|_||_\\__|\\__|_\\___/_||_|\n\n\nclass ActivationFunction(object):\n\n def __init__(self,\n output: Callable[[float], float],\n der: Callable[[float], float]):\n self.output = output\n self.der = der\n\n\ndef tanh(x: float) -> float:\n result = np.tanh(x)\n return result\n\n\ndef d_tanh(x: float) -> float:\n t = np.tanh(x)\n result = 1 - t * t\n return result\n\n\nclass TanhActivationFunction(ActivationFunction):\n\n def __init__(self):\n super().__init__(tanh, d_tanh)\n\n\ndef relu(x: float) -> float:\n result = np.max([0, x])\n return result\n\n\ndef d_relu(x: float) -> float:\n result = 0 if x <= 0 else 1\n return result\n\n\nclass ReluActivationFunction(ActivationFunction):\n\n def __init__(self):\n super().__init__(relu, d_relu)\n\n\ndef sigmoid(x):\n return 1.0 / (1.0 - np.exp(-x))\n\n\ndef d_sigmoid(x):\n output = sigmoid(x)\n result = output * (1 - output)\n return result\n\n\nclass SigmoidActivationFunction(ActivationFunction):\n\n def __init__(self):\n super().__init__(sigmoid, d_sigmoid)\n\n\nclass LinearActivationnFunction(ActivationFunction):\n\n def __init__(self):\n super().__init__(\n output = lambda x: x,\n der = lambda _: 1\n )\n\n\n# ___ _ _ _ _\n# | _ \\___ __ _ _ _| |__ _ _ _(_)_____ _| |_(_)___ _ _\n# | / -_) _` | || | / _` | '_| |_ / _` | _| / _ \\ ' \\\n# |_|_\\___\\__, |\\_,_|_\\__,_|_| |_/__\\__,_|\\__|_\\___/_||_|\n# |___/\n\n\nclass RegularizationFunction(object):\n\n def __init__(self,\n output: Callable[[float], float],\n der: Callable[[float], float]):\n self.output = output\n self.der = der\n\n\ndef l1(x: float) -> float:\n result = np.abs(x)\n return result\n\n\ndef d_l1(x: float) -> float:\n if x < 0:\n result = -1\n elif x > 0:\n result = 1\n else:\n result = 0\n return result\n\n\nclass L1RegularizationFunction(RegularizationFunction):\n\n def __init__(self):\n super().__init__(l1, d_l1)\n\n\ndef l2(x: float) -> float:\n result = 0.5 * x * x\n return result\n\n\ndef d_l2(x: float) -> float:\n return x\n\n\nclass L2RegularizationFunction(RegularizationFunction):\n\n def __init__(self):\n super().__init__(l2, d_l2)\n\n\n# _ _ _\n# | \\| |___ __| |___\n# | .` / _ \\/ _` / -_)\n# |_|\\_\\___/\\__,_\\___|\n\n\nclass Node(object):\n \"\"\" A node in a neural network. Each node has a state\n (total input, output, and their respective derivatives),\n which changes after every forward and back propagation run.\n \"\"\"\n\n def __init__(self,\n id: str,\n activation: ActivationFunction,\n init_zero_q: bool,\n ):\n self.id = id\n self.input_links = []\n self.outputs = []\n self.total_input = 0.0\n self.output = 0.0\n self.output_der = 0\n self.input_der = 0\n # Accumulated error derivative with respect to this node's total\n # input since the last update. This derivative equals dE/db where b\n # is the node's bias term.\n self.acc_input_der = 0\n # Number of accumulated err. derivatives with respect to the total\n # input since the last update.\n self.num_accumulated_ders = 0\n # Activation function that takes total input and returns output\n self.activation = activation\n self.bias = 0 if init_zero_q else 0.1\n\n def update_output(self) -> float:\n \"\"\"Recompute and return node's output.\"\"\"\n self.total_input = self.bias\n # inner product / dot product:\n for link in self.input_links:\n self.total_input += link.weight * link.source.output\n result = self.activation.output(self.total_input)\n self.output = result\n return result\n\n\n# _ _ _\n# | | (_)_ _ | |__\n# | |__| | ' \\| / /\n# |____|_|_||_|_\\_\\\n\n\nclass Link(object):\n \"\"\"A link in a neural network. Each link has a weight and a source and\n destination node. Also it has an internal state (error derivative\n with respect to a particular input) which gets updated after\n a run of back propagation (TODO and update_weights?).\"\"\"\n\n def __init__(self,\n source: Node,\n dest: Node,\n regularization: RegularizationFunction,\n init_zero_q: bool,\n ):\n self.id = source.id + '-' + dest.id\n self.source = source\n self.dest = dest\n self.weight = 0 if init_zero_q else np.random.random() - 0.5\n self.is_dead = False\n # Error derivative with respect to this weight. */\n self.error_der = 0.0\n # Accumulated error derivative since the last update. */\n self.acc_error_der = 0\n # Number of accumulated derivatives since the last update. */\n self.num_accumulated_ders = 0\n self.regularization = regularization\n\n\n# ___ _ _ _ _ _ _ _\n# | _ )_ _(_) |__| | | \\| |___| |___ __ _____ _ _| |__\n# | _ \\ || | | / _` | | .` / -_) _\\ V V / _ \\ '_| / /\n# |___/\\_,_|_|_\\__,_| |_|\\_\\___|\\__|\\_/\\_/\\___/_| |_\\_\\\n\n\ndef build_network(network_shape: List[int],\n activation: ActivationFunction,\n output_activation: ActivationFunction,\n regularization: RegularizationFunction,\n input_ids: List[str],\n init_zero_q: bool, # in original; doesn't seem necessary\n ) -> List[List[Node]]:\n \"\"\" Builds a neural network.\n @param networkShape The shape of the network. E.g. [1, 2, 3, 1] means\n the network will have one input node, 2 nodes in first hidden layer,\n 3 nodes in second hidden layer and 1 output node.\n @param activation The activation function of every hidden node.\n @param outputActivation The activation function for the output nodes.\n @param regularization The regularization function that computes a penalty\n for a given weight (parameter) in the network. If None,\n there will be no regularization.\n @param inputIds List of ids for the input nodes. \"\"\"\n num_layers = len(network_shape)\n id = 1\n # List of layers, with each layer a list of nodes. */\n network = []\n for layer_idx in range(num_layers):\n is_output_layer = layer_idx == num_layers - 1\n is_input_layer = layer_idx == 0\n current_layer = []\n network.append(current_layer)\n num_nodes = network_shape[layer_idx]\n for i in range(num_nodes):\n node_id = str(id)\n if is_input_layer:\n node_id = input_ids[i]\n else:\n id += 1\n node = Node(\n id=node_id,\n activation=(\n output_activation if is_output_layer else activation),\n init_zero_q=init_zero_q)\n current_layer.append(node)\n if layer_idx >= 1:\n for j in range(len(network[layer_idx - 1])):\n prev_node = network[layer_idx - 1][j]\n link = Link(source=prev_node,\n dest=node,\n regularization=regularization,\n init_zero_q=init_zero_q)\n prev_node.outputs.append(link)\n node.input_links.append(link)\n return network\n\n\n# ___ _ _\n# | _ \\_ _ ___ _ __ __ _ __ _ __ _| |_(_)___ _ _\n# | _/ '_/ _ \\ '_ \\/ _` / _` / _` | _| / _ \\ ' \\\n# |_| |_| \\___/ .__/\\__,_\\__, \\__,_|\\__|_\\___/_||_|\n# |_| |___/\n\n\ndef forward_prop(network: List[List[Node]], inputs: List[float]) -> float:\n \"\"\" Runs a forward propagation of the input through the\n network. Modifies the internal state of the network - the\n total input and output of each node in the network.\n\n @param network The neural network.\n @param inputs The input array. Its length must match the\n number of input nodes in the network.\n @return The final output of the network.\"\"\"\n input_layer = network[0]\n if len(inputs) != len(input_layer):\n raise TypeError(f\"\"\"Len inputs {len(inputs)} must match Len first\n layer of the network {len(input())}\"\"\")\n # Let the outputs of the input layer of nodes be the inputs given to this\n # forward-prop function.\n for i in range(len(input_layer)):\n node = input_layer[i]\n node.output = inputs[i]\n for layer_idx in range(1, len(network)):\n current_layer = network[layer_idx]\n for i in range(len(current_layer)):\n node = current_layer[i]\n node.update_output() # memoize in the output\n result = network[len(network) - 1][0].output\n return result\n\n\ndef back_prop(network: List[List[Node]],\n target: float,\n error_func: ErrorFunction):\n \"\"\" Runs a backward propagation using the provided target and the\n computed output of the previous call to forward propagation.\n This method modifies the internal state of the network - the error\n derivatives with respect to each node, and each weight\n in the network.\"\"\"\n output_node = network[len(network) - 1][0]\n # The output node is a special case. Use the user-defined error\n # function for the numerical value of the derivative.\n output_node.output_der = error_func.der(output_node.output, target)\n # Go through the layers backwards:\n for layer_idx in range(len(network) - 1, 0, -1): # skips layer 0\n current_layer = network[layer_idx]\n # TODO: Refactor as for layer in network[backward_slice_op]:\n # Compute error derivative of each node with respect to:\n # 1) its total input\n # 2) each input weight\n for i in range(len(current_layer)): # vertically\n node = current_layer[i]\n # where did output_der get set up? Ten lines above.\n node.input_der = node.output_der * node.activation.der(\n node.total_input) # chain rule?\n node.acc_input_der += node.input_der # linear combination of inputs\n node.num_accumulated_ders += 1\n # Error derivative with respect to each weight coming into the node.\n for i in range(len(current_layer)):\n node = current_layer[i]\n for j in range(len(node.input_links)):\n # TODO: Refactor as for link in node.input_links\n link = node.input_links[j]\n if link.is_dead:\n continue\n link.error_der = node.input_der * link.source.output\n link.acc_error_der += link.error_der\n link.num_accumulated_ders += 1\n if layer_idx == 1:\n # Don't backprop into the input layer; it has no incoming weights\n continue\n prev_layer = network[layer_idx - 1]\n for i in range(len(prev_layer)):\n # TODO: Refactor as for node in prev_layer\n node = prev_layer[i]\n node.output_der = 0\n for j in range(len(node.outputs)):\n # TODO: Refactor as for output in node.outputs\n output = node.outputs[j]\n node.output_der += output.weight * output.dest.input_der\n\n\ndef update_weights(network: List[List[Node]],\n learning_rate: float,\n regularization_rate: float):\n \"\"\"Updates the weights of the network using the previously accumulated\n error derivatives.\"\"\"\n for layer_idx in range(len(network)):\n current_layer = network[layer_idx]\n for i in range(len(current_layer)):\n node = current_layer[i]\n if node.num_accumulated_ders > 0:\n node.bias -= \\\n learning_rate * node.acc_input_der / \\\n node.num_accumulated_ders\n node.acc_input_der = 0\n node.num_accumulated_ders = 0\n for j in range(len(node.input_links)):\n link = node.input_links[j]\n if link.is_dead:\n continue\n regul_der = link.regularization.der(link.weight) \\\n if link.regularization is not None else 0 # TODO: right?\n # Update the weight based on dE/dw.\n if link.num_accumulated_ders > 0:\n link.weight -= \\\n (learning_rate / link.num_accumulated_ders) * \\\n link.acc_error_der # updated in 'backprop'\n new_link_weight = link.weight - \\\n (learning_rate * regularization_rate) * regul_der\n if link.regularization is L1RegularizationFunction and \\\n link.weight * new_link_weight < 0:\n # The weight crossed 0 due to the regularization term. Set\n # it to 0 and kill the link.\n link.weight = 0\n link.is_dead = True\n else:\n link.weight = new_link_weight\n link.acc_error_der = 0\n link.num_accumulated_ders = 0\n\n\ndef for_each_node(network: List[List[Node]], ignore_inputs: bool,\n visitor: Callable[[Node], Any]):\n rg = range(1 if ignore_inputs else 0, len(network))\n # TODO: Refactor as a slice, for layer in slice of network\n for layer_idx in rg:\n current_layer = network[layer_idx]\n for i in range(len(current_layer)):\n # TODO: Refactor as for node in current_layer\n node = current_layer[i]\n visitor(node)\n\n\ndef get_output_node(network: List[List[Node]], which=0):\n return network[-1][which]\n","sub_path":"zepto_tf/nn/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":15659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"472601775","text":"\"\"\"\r\n@Time : 2020/5/17\r\n@Author : KD_huhu\r\n@File : back_ground_qss\r\n@Description : \r\n\"\"\"\r\nimport sys\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtCore import *\r\n\r\n\r\napp = QApplication(sys.argv)\r\nwin = QMainWindow()\r\nwin.setWindowTitle(\"背景图片\")\r\nwin.resize(350,250)\r\nwin.setObjectName(\"MainWindow\")\r\npalette = QPalette()\r\n# 通过palette动态修改窗口的背景图片\r\npalette.setBrush(QPalette.Background,QBrush(QPixmap(\"../images/python.jpg\")))\r\n# 通过palette动态修改窗口的背景颜色\r\n# palette.setColor(QPalette.Background,Qt.red)\r\nwin.setPalette(palette)\r\nwin.show()\r\nsys.exit(app.exec())\r\n","sub_path":"p042/back_ground_qpalette.py","file_name":"back_ground_qpalette.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"517796373","text":"import pandas as pd\nimport os\nimport matplotlib.pyplot as plt\n\ndef DataLoad():\n path=os.path.join(\"sherwood.csv\")\n df=pd.read_csv(path)\n df=df.drop(['Id','Name','Type1', 'Type2', 'Path', 'DateCreated', 'DateModified', 'DateAccessed', 'Size', 'Deleted', 'DataAge', 'ARecency', 'MRecency', 'Unwritten', 'Unaccessed', 'Depth', 'Root', 'Data Asset'], axis=1)\n return df\n\ndef BoxPlot(df): # plots the box plot groupby Extension & risk score\n df.boxplot(column=[\"Risk Score\"],by=\"Extension\")\n plt.xlabel('Extensions')\n plt.ylabel('Risk Scores')\n plt.show()\n\ndef BarChart(df): # plots bar BarChart for extension category with mean risk score\n x=df.groupby([\"Extension\"],as_index=False)[\"Risk Score\"].median()\n df=x.sort_values('Risk Score',ascending = False).head(10)\n df.plot.bar(x='Extension', y='Risk Score',rot=20)\n plt.xlabel('Extensions')\n plt.ylabel('Mean')\n plt.show()\n print(df)\n\ndef main():\n df=DataLoad()\n df=df.sort_values('Risk Score',ascending = False)\n df=df.head(412)\n print(df)\n\n # c=0\n # for i in df[\"Risk Score\"]:\n # if int(i) > 59:\n # c+=1\n # print(c)\n\n # BoxPlot(df)\n # BarChart(df)\n # x=df.groupby([\"Extension\"],as_index=False)[\"Risk Score\"].median()\n # df=x.sort_values('Risk Score',ascending = False).head(10)\n df.plot.hist(column=[\"Risk Score\"],by=\"Extension\",range=[10, 85])#,bins=10)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ML_experiment/DataAnalysis.py","file_name":"DataAnalysis.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"361281174","text":"from modules.fileserver import FileServer\nfrom threading import Thread\nfrom time import sleep\nfrom os import _exit\nfrom modules.hamming import *\nfrom modules.config import *\nfrom modules.iohelper import update_file\nfrom modules.sender import send\nimport eventlet, socketio, os\n\nserver = FileServer(64296)\nREMOTE_HOST += \":64295\"\nsio = socketio.Server(async_handlers=True)\napp = socketio.WSGIApp(sio)\niteration = 0\n\n\ndef start_socket():\n global app\n eventlet.wsgi.server(eventlet.listen((\"\", 29083)), app)\n\n\n@sio.event\ndef connect(sid, environ):\n print(\"connect \", sid)\n\n\n@sio.on(\"parity generated\")\ndef on_patrity_generated(sid):\n print(\"Alice generated parity\")\n sio.emit(\"send parity\")\n\n\n@sio.on(\"parity sent\")\ndef on_parity_received(sid):\n global LEN_NES\n print(\"Parity received!\")\n print(\"Generating bad blocks... \", end=\"\")\n def cringe():\n global LEN_NES\n hamming_correct(\n BOB_KEY, PARITY, TEMP, BAD_BLOCKS, POWER, len_nes=LEN_NES // 11, drop_bad=True\n )\n update_file(TEMP, BOB_KEY)\n print(\"OK\\nShuffling key... \", end=\"\")\n shuffle(BOB_KEY, TEMP, LEN_SHUFFLE, 0)\n update_file(TEMP, BOB_KEY)\n print(\"OK\\nSending badblocks... \", end=\"\")\n send(REMOTE_HOST, BAD_BLOCKS)\n print(\"kOK\")\n LEN_NES = 0\n sio.emit(\"wipe badblocks\", LEN_NES)\n Thread(target=cringe).start()\n\n\n@sio.on(\"blocks wiped\")\ndef on_blocks_wiped(sid):\n print(\"Blocks wiped\")\n sio.emit(\"shuffle key\")\n\n\n@sio.on(\"iteration ended\")\ndef on_next_iteration(sid):\n global iteration\n print(f'*** THE ITERATION {iteration + 1} of {ITERATIONS} ***')\n if iteration == ITERATIONS:\n print(\"Finished! Closing in 10 sec\")\n sleep(10)\n _exit(0)\n print(\"Next iteration...\")\n iteration += 1\n sio.emit(\"generate parity\")\n\n\n@sio.on(\"hello\")\ndef hello(sid, data):\n global iteration\n print(f\"{sid}: {data}\")\n sio.emit(\"hello\", \"Hello, Alice!\")\n print(\"Alice is generating parity...\")\n sio.emit(\"generate parity\")\n iteration += 1\n\n\n@sio.event\ndef disconnect(sid):\n print(\"disconnect \", sid)\n\n\ndef run():\n global server\n print(\"Running Bob...\")\n Thread(target=start_socket).start()\n Thread(target=server.start).start()\n","sub_path":"bob.py","file_name":"bob.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"347038010","text":"import csv\nimport json\nwith open(\"../../assets/static/airports.csv\") as f:\n data = csv.reader(f,delimiter=',')\n cnt = 0\n data = list(data)\n dic = {}\n cid = {}\n for i in range(len(data)):\n if cnt is 0:\n print((data[i]))\n cnt = cnt + 1\n if dic.get(data[i][1]) is None:\n dic[data[i][1]] = {\"country\":data[i][8]}\n if cid.get(data[i][8]) is None:\n cid[data[i][8]] = []\n cid[data[i][8]].append(data[i][1])\n\n with open(\"../../assets/static/airport_to_country.json\",\"w\") as p:\n json.dump(dic,p,indent=4,sort_keys=True)\n\n with open(\"../../assets/static/country_to_airport.json\",\"w\") as q:\n json.dump(cid,q,indent=4,sort_keys=True)\n","sub_path":"data/scripts/bar.py","file_name":"bar.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"124345490","text":"import asynctest\nimport asynctest.mock as mock\n\nfrom opsdroid.memory import Memory\n\n\nclass TestMemory(asynctest.TestCase):\n \"\"\"Test the opsdroid memory class.\"\"\"\n\n def setup(self):\n return Memory()\n\n async def test_memory(self):\n memory = self.setup()\n data = \"Hello world!\"\n await memory.put(\"test\", data)\n self.assertEqual(data, await memory.get(\"test\"))\n self.assertIsNone(await memory.get(\"nonexistant\"))\n\n async def test_empty_memory(self):\n memory = self.setup()\n self.assertEqual(None, await memory.get(\"test\"))\n\n async def test_database_callouts(self):\n memory = self.setup()\n memory.databases = [mock.MagicMock()]\n memory.databases[0].get = mock.CoroutineMock()\n memory.databases[0].put = mock.CoroutineMock()\n data = \"Hello world!\"\n\n await memory.put(\"test\", data)\n self.assertTrue(memory.databases[0].put.called)\n\n memory.databases[0].reset_mock()\n\n await memory.get(\"test\")\n self.assertTrue(memory.databases[0].get.called)\n","sub_path":"tests/test_memory.py","file_name":"test_memory.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616727927","text":"\"\"\"\nSTN implementation for the PHOCNet.\nThis approach was not followed through and discussed in my thesis.\nHence good results, using this STN are not guaranteed.\n\n.. moduleauthor:: Maximilian Springenberg \n\n|\n\n\"\"\"\nfrom torch import nn\nimport torch.nn.functional as F\nfrom src.nn.pp import GPP, PPTypes, PPTypePooling\n\n\nclass STN(nn.Module):\n \"\"\"\n A simple STN implementation, that can be used as an initial layer in :class:`src.nn.phocnet.STNPHOCNet`.\n\n For more information on STNs have a look at Max Jaderbergs\n `paper `__.\n \"\"\"\n\n def __init__(self, input_channels=1):\n super().__init__()\n # convolutional layers\n kernel_size_conv = 3\n padding_conv = 1\n stride_conv = 1\n self.loc_c1 = nn.Conv2d(in_channels=input_channels, out_channels=16,\n kernel_size=kernel_size_conv, padding=padding_conv, stride=stride_conv)\n self.loc_c2 = nn.Conv2d(in_channels=self.loc_c1.out_channels, out_channels=32,\n kernel_size=kernel_size_conv, padding=padding_conv, stride=stride_conv)\n self.loc_c3 = nn.Conv2d(in_channels=self.loc_c2.out_channels, out_channels=32,\n kernel_size=kernel_size_conv, padding=padding_conv, stride=stride_conv)\n # set-up pooling layers\n self.padding_pooling = 0\n self.kernel_pooling = 2\n self.stride_pooling = 2\n # spatial pooling layer max of 1 + 4 + 16 + 64 + 256 = 341 bins per feature map => 341 * 32 = 10912 out_vector\n self.loc_spp = GPP(gpp_type=PPTypes.T_SPP, levels=5,\n pool_type=PPTypePooling.MAX_POOL, n_f_maps=self.loc_c3.out_channels)\n # regression\n self.loc_lin1 = nn.Linear(self.loc_spp.pooling_output_size, 1024)\n self.loc_out = nn.Linear(self.loc_lin1.out_features, 3*2)\n\n def forward(self, U):\n \"\"\"\n Forward pass of this STN, i.e. transformation of the input image/ map U.\n\n :param U: feature map/ image U\n :return: transformed image\n \"\"\"\n theta = self.f_loc(U)\n theta = theta.view(-1, 2, 3)\n sampling_grid = self.T_theta(theta, U.size())\n V = self.sampler(feature_map=U, sampling_grid=sampling_grid)\n return V\n\n def f_loc(self, U):\n \"\"\"\n The localisation network\n\n :param U: feature map/ image U\n :return: parameters :math:`\\\\Theta` for the grid generator\n \"\"\"\n # convolution\n theta = F.relu(self.loc_c1(U))\n theta = self.pool(theta)\n theta = F.relu(self.loc_c2(theta))\n theta = self.pool(theta)\n theta = F.relu(self.loc_c3(theta))\n # pyramidal pooling\n theta = self.loc_spp(theta)\n # regression values\n theta = F.relu(self.loc_lin1(theta))\n theta = F.relu(self.loc_out(theta))\n return theta\n\n def pool(self, x_in):\n try:\n # pooling\n x_out = F.max_pool2d(x_in, kernel_size=self.kernel_pooling, stride=self.stride_pooling,\n padding=self.padding_pooling)\n except RuntimeError as rte:\n return x_in\n return x_out\n\n def T_theta(self, theta, size):\n \"\"\"\n The grid generator, applied to the regular spatial grid\n\n :param theta: parameters of the grid generator (usually provided by the localisation network)\n :param size: size of the input feature-map/ image\n :return: generated sampling grid\n \"\"\"\n sampling_grid = F.affine_grid(theta, size)\n return sampling_grid\n\n def sampler(self, feature_map, sampling_grid):\n \"\"\"\n The sampler\n\n :param feature_map: input feature-map/ image\n :param sampling_grid: sampling grid, used for warping\n :return: warped feature-map/ image\n \"\"\"\n V = F.grid_sample(feature_map, sampling_grid)\n return V\n\n def setup(self):\n return {'c_in': self.loc_c1.in_channels}\n","sub_path":"src/nn/stn.py","file_name":"stn.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"652002246","text":"import re\nimport types\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom itertools import chain\n\nimport pandas as pd\nfrom IPython.core.display import display\nfrom bw2data.backends.peewee.utils import dict_as_exchangedataset\nfrom sympy import symbols\n\nfrom .base_utils import *\nfrom .base_utils import _getDb, _eprint, _actDesc, _getAmountOrFormula, _actName\nfrom .params import *\nfrom .params import _param_registry, _completeParamValues\n\n\nBIOSPHERE3_DB_NAME = 'biosphere3'\n_USER_DB = None\n\ndef USER_DB() :\n if _USER_DB is not None :\n return _USER_DB\n else:\n # Try to guess it\n candidates = list(key for key in bw.databases.keys() if key not in [ECOINVENT_DB_NAME(), BIOSPHERE3_DB_NAME])\n if len(candidates) == 1 :\n return candidates[0]\n\n raise Exception(\"Unable to guess the name of the user DB. Please set it manually with SET_USEr_DB('YourDbName')\")\n\ndef SET_USER_DB(userDbName) :\n global _USER_DB\n _USER_DB = userDbName\n\ndef ECOINVENT_DB_NAME() :\n \"\"\"Return the name of the ecoinvent DB\"\"\"\n for key in bw.databases.keys() :\n if 'ecoinvent' in key :\n return key\n\n raise Exception(\"No ecoinvent DB found\")\n\nold_amount = symbols(\"old_amount\") # Can be used in epxression of amount for updateExchanges, in order to reference the previous value\nNumOrExpression = Union[float, Basic]\n\n\n\n\nclass ActivityExtended(Activity):\n \"\"\"Improved API for activity : adding a few useful methods.\n Those methods are backported to #Activity in order to be directly available on all existing instances\n \"\"\"\n\n def getExchange(self, name=None, input=None, single=True):\n \"\"\"Get exchange by name or input\n\n Parameters\n ----------\n name : name of the exchange. Name can be suffixed with '#LOCATION' to distinguish several exchanges with same name. \\\n It can also be suffised by '*' to match an exchange starting with this name. Location can be a negative match '!'\n Exampple : \"Wood*#!RoW\" matches any exchange with name containing Wood, and location not \"RoW\"\n\n single :True if a single match is expected. Otherwize, a list of result is returned\n\n Returns\n -------\n Single exchange or list of exchanges (if _single is False or \"name\" contains a '*')\n raise Exception if not matching exchange found\n \"\"\"\n\n def single_match(name, exch):\n\n # Name can be \"Elecricity#RER\"\n if \"#\" in name:\n name, loc = name.split(\"#\")\n negative = False\n if loc.startswith(\"!\"):\n negative = True\n loc = loc[1:]\n act = getActByCode(*exch['input'])\n\n if not 'location' in act or (negative and act['location'] == loc) or (\n not negative and act['location'] != loc):\n return False\n\n if '*' in name:\n name = name.replace('*', '')\n return name in exch['name']\n else:\n return name == exch['name']\n\n def match(exch):\n if name:\n if isinstance(name, list):\n return any(single_match(iname, exch) for iname in name)\n else:\n return single_match(name, exch)\n\n if input:\n return input == exch['input']\n\n exchs = list(exch for exch in self.exchangesNp() if match(exch))\n if len(exchs) == 0:\n raise Exception(\"Found no exchange matching name : %s\" % name)\n\n if single and len(exchs) != 1:\n raise Exception(\"Expected 1 exchange with name '%s' found %d\" % (name, len(exchs)))\n if single:\n return exchs[0]\n else:\n return exchs\n\n def setOutputAmount(self, amount):\n '''Set the amount for the single output exchange (1 by default)'''\n self.addExchanges({self: amount})\n\n def updateExchanges(self, updates: Dict[str, any] = dict()):\n \"\"\"Update existing exchanges, by name.\n\n Parameters\n ----------\n updates : Dict of \"\" => \n\n can be suffixed with '#LOCATION' to distinguish several exchanges with same name. \\\n It can also be suffixed by '*' to match an exchange starting with this name. Location can be a negative match '!'\n Exampple : \"Wood*#!RoW\" matches any exchange with name containing Wood, and location not \"RoW\"\n\n : either single value (float or SympPy expression) for updating only amount, or activity for updating only input,\n or dict of attributes, for updating both at once, or any other attribute.\n The amount can reference the symbol 'old_amount' that will be replaced with the current amount of the exchange.\n \"\"\"\n\n # Update exchanges\n for name, attrs in updates.items():\n\n exchs = self.getExchange(name, single=not '*' in name)\n if not isinstance(exchs, list):\n exchs = [exchs]\n for exch in exchs:\n\n if attrs is None:\n exch.delete()\n exch.save()\n continue\n\n # Single value ? => amount\n if not isinstance(attrs, dict):\n if isinstance(attrs, Activity):\n attrs = dict(input=attrs)\n else:\n attrs = dict(amount=attrs)\n\n if 'amount' in attrs:\n attrs.update(_amountToFormula(attrs['amount'], exch['amount']))\n\n exch.update(attrs)\n exch.save()\n\n # We have a formula now ? => register it to parametrized exchange\n if 'formula' in attrs:\n bw.parameters.add_exchanges_to_group(DEFAULT_PARAM_GROUP, self)\n\n def deleteExchanges(self, name, single=True):\n ''' Remove matching exchanges '''\n exchs = self.getExchange(name, single=single)\n if not isinstance(exchs, list):\n exchs = [exchs]\n if len(exchs) == 0:\n raise Exception(\"No exchange found for '%s'\" % name)\n for ex in exchs:\n ex.delete()\n ex.save()\n self.save()\n\n def substituteWithDefault(self, exchange_name: str, switch_act: Activity, paramSwitch: EnumParam, amount=None):\n\n \"\"\"Substitutes one exchange with a switch on other activities, or fallback to the current one as default (parameter set to None)\n For this purpose, we create a new exchange referencing the activity switch, and we multiply current activity by '_default',\n making it null as soon as one enum value is set.\n\n This is useful for changing electricty mix, leaving the default one if needed\n\n Parameters\n ----------\n act : Activity to update\n exchange_name : Name of the exchange to update\n switch_act : Activity to substitue as input\n amount : Amount of the input (uses previous amount by default)\n \"\"\"\n\n current_exch = self.getExchange(exchange_name)\n\n prev_amount = amount if amount else _getAmountOrFormula(current_exch)\n\n self.addExchanges({switch_act: prev_amount})\n self.updateExchanges({exchange_name: paramSwitch.symbol(None) * prev_amount})\n\n def addExchanges(self, exchanges: Dict[Activity, Union[NumOrExpression, dict]] = dict()):\n \"\"\"Add exchanges to an existing activity, with a compact syntax :\n\n Parameters\n ----------\n exchanges : Dict of activity => amount or activity => attributes_dict. \\\n Amount being either a fixed value or Sympy expression (arithmetic expression of Sympy symbols)\n \"\"\"\n parametrized = False\n for sub_act, attrs in exchanges.items():\n\n if isinstance(attrs, dict):\n amount = attrs.pop('amount')\n else:\n amount = attrs\n attrs = dict()\n\n exch = self.new_exchange(\n input=sub_act.key,\n name=sub_act['name'],\n unit=sub_act['unit'] if 'unit' in sub_act else None,\n type='production' if self == sub_act else 'biosphere' if sub_act[\n 'database'] == BIOSPHERE3_DB_NAME else 'technosphere')\n\n exch.update(attrs)\n exch.update(_amountToFormula(amount))\n if 'formula' in exch:\n parametrized = True\n\n exch.save()\n self.save()\n if parametrized:\n bw.parameters.add_exchanges_to_group(DEFAULT_PARAM_GROUP, self)\n\n def getAmount(self, *args, sum=False, **kargs):\n \"\"\"\n Get the amount of one or several exchanges, selected by name or input. See #getExchange()\n \"\"\"\n exchs = self.getExchange(*args, single=not sum, **kargs)\n if sum:\n res = 0\n if len(exchs) == 0:\n raise Exception(\"No exchange found\")\n for exch in exchs:\n res += _getAmountOrFormula(exch)\n return res\n else:\n return _getAmountOrFormula(exchs)\n\n def exchangesNp(self):\n \"\"\" List of exchange, except production (output) one.\"\"\"\n for exch in self.exchanges():\n if exch['input'] != exch['output']:\n yield exch\n\n\n# Backport new methods to vanilla Activity class in order to benefit from it for all existing instances\nfor name, item in ActivityExtended.__dict__.items():\n if isinstance(item, types.FunctionType):\n setattr(Activity, name, item)\n\n\ndef _split_words(name):\n clean = re.sub('[^0-9a-zA-Z]+', ' ', name)\n clean = re.sub(' +', ' ', clean)\n clean = clean.lower()\n\n return clean.split(' ')\n\n\ndef _build_index(db):\n res = defaultdict(set)\n for act in db:\n words = _split_words(act['name'])\n for word in words:\n res[word].add(act)\n return res\n\n\n# Index of activities per name, for fast search dict[db_name][activity_word] => list of activitites\ndb_index = dict()\n\n\ndef _get_indexed_db(db_name):\n if not db_name in db_index:\n db_index[db_name] = _build_index(_getDb(db_name))\n return db_index[db_name]\n\n\ndef _find_candidates(db_name, name):\n res = []\n index = _get_indexed_db(db_name)\n words = _split_words(name)\n for word in words:\n candidates = index[word]\n if len(res) == 0 or (0 < len(candidates) < len(res)):\n res = list(candidates)\n return res\n\n\ndef getActByCode(db_name, code):\n \"\"\" Get activity by code \"\"\"\n return _getDb(db_name).get(code)\n\n\ndef findActivity(name=None, loc=None, in_name=None, code=None, categories=None, category=None, db_name=None,\n single=True, unit=None):\n \"\"\"\n Find single activity by name & location\n Uses index for fast fetching\n \"\"\"\n\n if name and '*' in name:\n in_name = name.replace(\"*\", \"\")\n name = None\n\n def act_filter(act):\n if name and not name == act['name']:\n return False\n if in_name and not in_name in act['name']:\n return False\n if loc and not loc == act['location']:\n return False\n if unit and not unit == act['unit']:\n return False\n if category and not category in act['categories']:\n return False\n if categories and not tuple(categories) == act['categories']:\n return False\n return True\n\n if code:\n acts = [getActByCode(db_name, code)]\n else:\n search = name if name else in_name\n\n search = search.lower()\n search = search.replace(',', ' ')\n search = re.sub('\\w*[^a-zA-Z ]+\\w*', ' ', search)\n\n # Find candidates via index\n # candidates = _find_candidates(db_name, name_key)\n candidates = _getDb(db_name).search(search, limit=200)\n\n # print(candidates)\n\n # Exact match\n acts = list(filter(act_filter, candidates))\n\n if single and len(acts) == 0:\n raise Exception(\"No activity found in '%s' with name '%s' and location '%s'\" % (db_name, name, loc))\n if single and len(acts) > 1:\n raise Exception(\"Several activity found in '%s' with name '%s' and location '%s':\\n%s\" % (\n db_name, name, loc, str(acts)))\n if len(acts) == 1:\n return acts[0]\n else:\n return acts\n\n\ndef findBioAct(name=None, loc=None, **kwargs):\n \"\"\"Alias for findActivity(name, ... db_name=BIOSPHERE3_DB_NAME)\n \"\"\"\n return findActivity(name=name, loc=loc, db_name=BIOSPHERE3_DB_NAME, **kwargs)\n\n\ndef findTechAct(name=None, loc=None, **kwargs):\n \"\"\"Alias for findActivity(name, ... db_name=ECOINVENT_DB_NAME)\n \"\"\"\n return findActivity(name=name, loc=loc, db_name=ECOINVENT_DB_NAME(), **kwargs)\n\n\ndef _amountToFormula(amount: Union[float, str, Basic], currentAmount=None):\n \"\"\"Transform amount in exchange to either simple amount or formula\"\"\"\n res = dict()\n if isinstance(amount, Basic):\n\n if currentAmount != None:\n amount = amount.subs(old_amount, currentAmount)\n\n # Check the expression does not reference undefined params\n all_symbols = list([key for param in _param_registry().values() for key, val in param.expandParams().items()])\n for symbol in amount.free_symbols:\n if not str(symbol) in all_symbols:\n raise Exception(\"Symbol '%s' not found in params : %s\" % (symbol, all_symbols))\n\n res['formula'] = str(amount)\n res['amount'] = 0\n elif isinstance(amount, float) or isinstance(amount, int):\n res['amount'] = amount\n else:\n raise Exception(\n \"Amount should be either a constant number or a Sympy expression (expression of ParamDef). Was : %s\" % type(\n amount))\n return res\n\n\ndef _newAct(db_name, code):\n db = _getDb(db_name)\n # Already present : delete it ?\n for act in db:\n if act['code'] == code:\n _eprint(\"Activity '%s' was already in '%s'. Overwriting it\" % (code, db_name))\n act.delete()\n\n return db.new_activity(code)\n\n\ndef newActivity(db_name, name, unit,\n exchanges: Dict[Activity, Union[float, str]] = dict(),\n code=None,\n **argv):\n \"\"\"Creates a new activity\n\n Parameters\n ----------\n name : Name ofthe new activity\n db_name : Destination DB : ACV DB by default\n exchanges : Dict of activity => amount. If amount is a string, is it considered as a formula with parameters\n argv : extra params passed as properties of the new activity\n \"\"\"\n act = _newAct(db_name, code if code else name)\n act['name'] = name\n act['type'] = 'process'\n act['unit'] = unit\n act.update(argv)\n\n # Add exchanges\n act.addExchanges(exchanges)\n\n return act\n\n\ndef copyActivity(db_name, activity: ActivityExtended, code=None, withExchanges=True, **kwargs) -> ActivityExtended:\n \"\"\"Copy activity into a new DB\"\"\"\n\n res = _newAct(db_name, code)\n\n for key, value in activity.items():\n if key not in ['database', 'code']:\n res[key] = value\n for k, v in kwargs.items():\n res._data[k] = v\n res._data[u'code'] = code\n res['name'] = code\n res.save()\n\n if withExchanges:\n for exc in activity.exchanges():\n data = deepcopy(exc._data)\n data['output'] = res.key\n # Change `input` for production exchanges\n if exc['input'] == exc['output']:\n data['input'] = res.key\n ExchangeDataset.create(**dict_as_exchangedataset(data))\n\n return res\n\n\ndef newSwitchAct(dbname, name, paramDef: ParamDef, acts_dict: Dict[str, Activity]):\n \"\"\"Create a new parametrized, virtual activity, made of a map of other activities, controlled by an enum parameter.\n This enables to implement a \"Switch\" with brightway parameters\n Internally, this will create a linear sum of other activities controlled by _ : 0 or 1\n\n By default, all activities have associated amount of 1.\n You can provide other amounts by providing a tuple of (activity, amount).\n\n Parameters\n ----------\n dbname: name of the target DB\n name: Name of the new activity\n paramDef : parameter definition of type enum\n acts_dict : dict of \"enumValue\" => activity or \"enumValue\" => (activity, amount)\n\n Examples\n --------\n\n >>> newSwitchAct(MYDB, \"switchAct\", switchParam, {\n >>> \"val1\" : act1 # Amount is 1\n >>> \"val2\" : (act2, 0.4) # Different amount\n >>> \"val3\" : (act3, b + 6) # Amount with formula\n >>> }\n \"\"\"\n\n # Transform map of enum values to corresponding formulas _\n exch = dict()\n for key, act in acts_dict.items() :\n amount = 1\n if type(act) == list or type(act) == tuple :\n act, amount = act\n exch[act] = amount * paramDef.symbol(key)\n\n res = newActivity(\n dbname,\n name,\n unit=list(acts_dict.values())[0]['unit'],\n exchanges=exch)\n\n return res\n\n\ndef printAct(*args, **params):\n \"\"\"\n Print activities and their exchanges.\n If parameter values are provided, formulas will be evaluated accordingly\n \"\"\"\n tables = []\n names = []\n\n activities = args\n\n for act in activities:\n inputs_by_ex_name = dict()\n df = pd.DataFrame(index=['input', 'amount', 'unit'])\n data = dict()\n for (i, exc) in enumerate(act.exchanges()):\n\n if exc['type'] == 'production' :\n continue\n\n input = bw.get_activity(exc.input.key)\n amount = _getAmountOrFormula(exc)\n\n # Params provided ? Evaluate formulas\n if len(params) > 0 and isinstance(amount, Basic):\n new_params = [(name, value) for name, value in _completeParamValues(params).items()]\n amount = amount.subs(new_params)\n\n ex_name = exc['name']\n #if 'location' in input and input['location'] != \"GLO\":\n # name += \"#%s\" % input['location']\n #if exc.input.key[0] not in [BIOSPHERE3_DB_NAME, ECOINVENT_DB_NAME()]:\n # name += \" {user-db}\"\n\n # Unique name : some exchanges may havve same names\n _name = ex_name\n i = 1\n while ex_name in data:\n ex_name = \"%s#%d\" % (_name, i)\n i += 1\n\n inputs_by_ex_name[ex_name] = input\n\n input_name = _actName(input)\n if input.key[0] == USER_DB():\n input_name += \"{user-db}\"\n\n data[ex_name] = [input_name, amount, exc.unit]\n\n # Provide impact calculation if impact provided\n\n\n for key, values in data.items():\n df[key] = values\n\n tables.append(df.T)\n names.append(_actDesc(act))\n\n full = pd.concat(tables, axis=1, keys=names, sort=True)\n\n # Highlight differences in case two activites are provided\n if len(activities) == 2:\n yellow = \"background-color:yellow\"\n iamount1 = full.columns.get_loc((names[0], \"amount\"))\n iamount2 = full.columns.get_loc((names[1], \"amount\"))\n iact1 = full.columns.get_loc((names[0], \"input\"))\n iact2 = full.columns.get_loc((names[1], \"input\"))\n\n def same_amount(row):\n res = [\"\"] * len(row)\n\n if row[iamount1] != row[iamount2]:\n res[iamount1] = yellow\n res[iamount2] = yellow\n if row[iact1] != row[iact2]:\n res[iact1] = yellow\n res[iact2] = yellow\n return res\n\n full = full.style.apply(same_amount, axis=1)\n\n display(full)\n\n\ndef newInterpolatedAct(dbname: str, name: str, act1: ActivityExtended, act2: ActivityExtended, x1, x2, x, alpha1=1,\n alpha2=1, **kwargs):\n \"\"\"Creates a new activity made of interpolation of two similar activities.\n For each exchange :\n amount = alpha1 * a1 + (x - X1) * (alpha2 * a2 - alpha1 * a1) / (x2 - x1)\n\n Parameters\n ----------\n name : Name of new activity\n act1 : Activity 1\n act2 : Activity 2\n x1 : X for act1\n x2 : X for act 2\n x : Should be a parameter symbol\n alpha1 : Ratio for act1 (Default value = 1)\n alpha2 : Ratio for act2 (Default value = 1)\n kwargs : Any other param will be added as attributes of new activity\n \"\"\"\n res = copyActivity(dbname, act1, name, withExchanges=False, **kwargs)\n\n exch1_by_input = dict({exch['input']: exch for exch in act1.exchangesNp()})\n exch2_by_input = dict({exch['input']: exch for exch in act2.exchangesNp()})\n\n inputs = set(chain(exch1_by_input.keys(), exch2_by_input.keys()))\n\n for input in inputs:\n\n exch1 = exch1_by_input.get(input)\n exch2 = exch2_by_input.get(input)\n exch = exch1 if exch1 else exch2\n\n amount1 = exch1['amount'] if exch1 else 0\n amount2 = exch2['amount'] if exch2 else 0\n\n if exch1 and exch2 and exch1['name'] != exch2['name']:\n raise Exception(\"Input %s refer two different names : %s, %s\" % (input, exch1['name'], exch2['name']))\n\n amount = interpolate(x, x1, x2, amount1 * alpha1, amount2 * alpha2)\n act = getActByCode(*input)\n res.addExchanges({act: dict(amount=amount, name=exch['name'])})\n return res\n\n\n\n\n","sub_path":"lca_algebraic/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":21448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"134739108","text":"#User function Template for python3\n\nclass Solution:\n #Function to sort a list using quick sort algorithm.\n def quickSort(self,arr,low,high):\n if(low 1582:\r\n a = np.floor(y / 100)\r\n b = 2 - a + np.floor(a / 4)\r\n elif month < 10:\r\n pass\r\n elif month > 10:\r\n a = np.floor(y / 100)\r\n b = 2 - a + np.floor(a / 4)\r\n elif day <= 4:\r\n pass\r\n elif day > 14:\r\n a = np.floor(y / 100)\r\n b = 2 - a + np.floor(a / 4)\r\n else:\r\n print('dates specific within 5th - 14th Oct 1582, which is not a valid date for JD conversion')\r\n quit() # exit simulation\r\n\r\n jd = np.floor(365.25 * y + c) + np.floor(30.6001 * (m + 1))\r\n jdn = jd + day + b + 1720994.5\r\n\r\n return jdn\r\n\r\n\r\ndef gast(jdate):\r\n \"\"\" Greenwich apparent sidereal time\r\n\r\n :param jdate: julian date\r\n :return gst: greenwich siderial time\r\n \"\"\"\r\n dtr = np.pi/180 # degrees to radians\r\n atr = dtr/3600 # arc second to radians\r\n\r\n # time arguments\r\n t = (jdate - 2451545) / 36525 # number of julian centuries since 12:00 01 Jan 2000\r\n t2 = t * t\r\n t3 = t * t2\r\n\r\n # fundamental trig arguments (modulo 2pi functions)\r\n l = (dtr * (280.4665 + 36000.7698 * t)) % (2*np.pi)\r\n lp = (dtr * (218.3165 + 481267.8813 * t)) % (2*np.pi)\r\n lraan = (dtr * (125.04452 - 1934.136261 * t)) % (2*np.pi)\r\n\r\n # nutations in longitude and obliquity\r\n dpsi = atr * (-17.2 * np.sin(lraan) - 1.32 * np.sin(2 * l) - 0.23 * np.sin(2 * lp) + 0.21 * np.sin(2 * lraan))\r\n deps = atr * (9.2 * np.cos(lraan) + 0.57 * np.cos(2 * l) + 0.1 * np.cos(2 * lp) - 0.09 * np.cos(2 * lraan))\r\n\r\n # mean and apparent obliquity of the ecliptic\r\n eps0 = (dtr * (23 + 26 / 60 + 21.448 / 3600) + atr * (-46.815 * t - 0.00059 * t2 + 0.001813 * t3)) % (2*np.pi)\r\n obliq = eps0 + deps\r\n\r\n # greenwich mean and apparent sidereal time\r\n gstm = (dtr * (280.46061837 + 360.98564736629 * (jdate - 2451545) + 0.000387933 * t2 - t3 / 38710000)) % (2*np.pi)\r\n gst = (gstm + dpsi * np.cos(obliq)) % (2*np.pi)\r\n\r\n return gst\r\n\r\n\r\n# equations of motion for a spacecraft experiencing J2 only and with no thrust\r\ndef eq_of_motion_noman(y, t, mu, Re, J2, incl):\r\n '''\r\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.odeint.html\r\n '''\r\n # assuming J2 only and no thrust\r\n\r\n a, raan, aol = y # feeds in current state\r\n\r\n # differential equations\r\n da_dt = 0\r\n draan_dt = (-3/2)*a**(-2)*(a**(-3)*mu)**(1/2)*np.cos(incl)*J2*Re**2*(1+(3/2)*a**(-2)*(1+(-3/2)*np.sin(incl)**2)*J2*Re**2)\r\n daol_dt = (1/64)*a**(-4)*(a**(-3)*mu)**(1/2)*(8*a**2+3*(1+3*np.cos(2*incl))*J2*Re**2)*(8*a**2+3*(3+5*np.cos(2*incl))*J2*Re**2)\r\n\r\n return [da_dt, draan_dt, daol_dt]\r\n\r\n# Fixed time step solve for ODEs\r\ndef odeFixedStepnoman(eq, y0, tmin, tmax, tstep, mu, Re, J2, incl):\r\n \"\"\" Fixed-step solver for ODEs\r\n\r\n :param eq: equations of motion\r\n :param y0: initial state vector\r\n :param tmax: end time\r\n :param tmin: start time\r\n :param tstep: step size\r\n\r\n :return sol: state vector at each time step\r\n :return t: time vector\r\n \"\"\"\r\n\r\n\r\n t = np.linspace(tmin, tmax, int((tmax - tmin)/tstep)) # linspace takes args of : start, stop, and number of divisions\r\n\r\n sol = odeint(eq_of_motion_noman, y0, t, args = (mu, Re, J2, incl)) # call ode solver and pass in necessary arguments for function\r\n\r\n return sol, t\r\n\r\n \r\n# calculate latitude and longitude of sub-satellite point\r\ndef lat_long_ssp_calc(state, ttotal, Re, incl, flattening, rot_rate, jdate_start):\r\n # jdate_start = greg2jd(month, day, year) # calculate julian date of start date for use in gst calculations\r\n # jdate_end = jdate_start + ttotal / 86400 # add on manoeuvre time as fraction of day to give Julian date of time of interest NOT NEEDED - want gst of start\r\n \r\n a_total, RAAN_total, u_total = state \r\n gst_start = gast(jdate_start) # calculate gst for time of interest\r\n \r\n lat_ssp_c = np.arcsin(np.sin(incl) * np.sin(u_total)) # calculate geocentric latitude of sub-satellite point\r\n lat_ssp = np.arctan(np.tan(lat_ssp_c)/(1 - flattening * (2 - flattening))) # convert geocentric latitude of ssp to geodetic\r\n long_ssp = np.arctan2( np.cos(incl) * np.sin(u_total) , np.cos(u_total) ) - rot_rate * ttotal + RAAN_total - gst_start # calculate longitude of ssp (need to do gst as function of time....)\r\n\r\n return(lat_ssp, long_ssp)\r\n\r\n\r\n# calculate haversince curved surface distance between subsatellite point and target\r\ndef find_dist(lat_ssp, long_ssp, lat_poi, long_poi, Re):\r\n\r\n # calculate curved surface distance from sub-satellite point to target (need to do at each time step for moving target...)\r\n dist_to_target = 2 * Re * np.arcsin(np.sqrt((np.sin((lat_ssp - lat_poi)/(2)))**2 + np.cos(lat_ssp) * np.cos(lat_poi) * (np.sin((long_ssp - long_poi)/(2)))**2))\r\n\r\n return(dist_to_target)\r\n\r\n\r\n# find satellite elevatio compared to target\r\ndef find_sat_elevation(a, dist, Re):\r\n angle_between_ssp_and_tar = dist / Re # assuming the Earth is a sphere, calculate and between the line of the Earth's centre and subsatellite point and the line of the target and Earth's centre. Treat the distance as an arc and the Earth's radius as the radius of the sector.\r\n \r\n earth_angular_radius = np.arcsin(Re / a)\r\n nadir_angle = np.arctan((np.sin(earth_angular_radius) * np.sin(angle_between_ssp_and_tar)) / (1 - np.sin(earth_angular_radius) * np.cos (angle_between_ssp_and_tar)))\r\n\r\n elevation = np.pi / 2 - angle_between_ssp_and_tar - nadir_angle\r\n return elevation\r\n","sub_path":"supporting_funcs.py","file_name":"supporting_funcs.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"486555480","text":"\"\"\"Chronophore is a simple time-tracking program. It keeps track of\nusers' hours as they sign in and out. Data is stored in a\nhuman-readable json file.\n\nThis project was started to help keep track of students signing in\nand out at a tutoring program in a community college, but should be\nadaptable to other use cases.\n\"\"\"\nfrom sqlalchemy.orm import sessionmaker\n\n__title__ = 'chronophore'\n__version__ = '0.6.0'\n__license__ = 'MIT'\n__author__ = 'Amin Mesbah'\n__email__ = 'mesbahamin@gmail.com'\n__description__ = 'Desktop app for tracking sign-ins and sign-outs in a tutoring center.'\n\nSession = sessionmaker()\n","sub_path":"chronophore/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"277373259","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('catalog', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='addsize',\n options={},\n ),\n migrations.AlterField(\n model_name='addsize',\n name='name',\n field=models.CharField(max_length=1, verbose_name=b'\\xd0\\xa0\\xd0\\xb0\\xd0\\xb7\\xd0\\xbc\\xd0\\xb5\\xd1\\x80', choices=[(b'1', b'50'), (b'2', b'56'), (b'3', b'62'), (b'4', b'68'), (b'5', b'74'), (b'6', b'80'), (b'7', b'86'), (b'8', b'92'), (b'9', b'98'), (b'10', b'104'), (b'11', b'110'), (b'12', b'116'), (b'13', b'122'), (b'14', b'128'), (b'15', b'134'), (b'16', b'140'), (b'17', b'146')]),\n ),\n migrations.AlterField(\n model_name='addsize',\n name='size',\n field=models.ForeignKey(to='catalog.Product'),\n ),\n ]","sub_path":"catalog/migrations/0002_auto_20160112_1645.py","file_name":"0002_auto_20160112_1645.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"86794490","text":"\n\nfrom xai.brain.wordbase.nouns._rug import _RUG\n\n#calss header\nclass _RUGS(_RUG, ):\n\tdef __init__(self,): \n\t\t_RUG.__init__(self)\n\t\tself.name = \"RUGS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"rug\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_rugs.py","file_name":"_rugs.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221244440","text":"# reducer code\n\nimport sys\nimport pickle\nimport pandas as pd\nfrom io import StringIO\nfrom sklearn.feature_extraction.text import CountVectorizer\n\ndir_root = './datascience/'\nfilename = dir_root + 'models/dtc.mlmodel'\nmodel = pickle.load(open(filename, 'rb'))\n\ndef classify():\n for line in sys.stdin:\n line = line.strip()\n cols = line.split('\\t')\n if len(cols) < 4:\n continue\n (uid, purchases, mean, stddev) = (cols[0], int(cols[1]), float(cols[2]), float(cols[3]))\n prediction = model.predict([[purchases, mean, stddev]])\n print(uid + '\\t' + str(prediction[0]))\n\nif __name__ == \"__main__\":\n classify()\n","sub_path":"dtc_reducer.py","file_name":"dtc_reducer.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"41574914","text":"from flask import Flask, jsonify\nfrom flask import request\nfrom build_data import build_rule_kcs_dict, load_kcs_case_dict, build_case_pairs\nfrom run_classifier import load_model, do_predict, flags, tf\nimport json\n\napp = Flask(__name__)\n\ncustomer_inputs = [\n]\n\nestimator = load_model()\n\n\ndef handle_rule_prioritization(customer_input, hit_rules):\n rk_dict = build_rule_kcs_dict()\n kcs_cases_df = load_kcs_case_dict()\n test_df = build_case_pairs(rk_dict, kcs_cases_df, customer_input, hit_rules)\n result_df = do_predict(estimator, test_df)\n result_df = get_filter_rank_rules(result_df)\n return result_df\n\n\ndef get_filter_rank_rules(result_df):\n THRESHOLD = 0.2\n kcs_rule_dict = json.load(open(\"./data/kcs_rule_dict.json\"))\n filter_df = result_df.groupby(['caseb_kcs'])['predict_value'].agg(['mean']).reset_index()\n # filter_df[filter_df['mean'] > 0.2]\n filter_df['rule'] = filter_df.apply(lambda x: kcs_rule_dict[x['caseb_kcs']], axis=1)\n filter_df = filter_df[['rule', 'mean']]\n filter_df.columns = ['rule', 'score']\n filter_df['show'] = filter_df.apply(lambda x: True if x['score'] >= 0.2 else False, axis=1)\n filter_df = filter_df.sort_values(by=['score'], ascending=False)\n return filter_df\n\n\n@app.route('/customer_inputs', methods=['GET'])\ndef get_customer_inputs():\n return jsonify({'customer_inputs': customer_inputs})\n\n\n@app.route('/customer_inputs', methods=['POST'])\ndef create_customer_inputs_():\n if not request.json or not 'customer_input' in request.json or not 'hit_rules' in request.json:\n abort(400)\n input = {\n 'customer_input_id': request.json['customer_input_id'],\n 'customer_input': request.json['customer_input'],\n 'description': request.json.get('description', \"\"),\n 'hit_rules': request.json.get('hit_rules', []),\n }\n result_df = handle_rule_prioritization(input['customer_input'], input['hit_rules'])\n input['rank_result'] = result_df.to_json(orient='records')\n customer_inputs.append(input)\n\n return jsonify(input['rank_result']), 201\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"data_dir\")\n flags.mark_flag_as_required(\"task_name\")\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.logging.set_verbosity(tf.logging.ERROR)\n app.run(host='0.0.0.0',port=8090)\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"420783053","text":"\"\"\"\nLab1starter_ OCSbent.py\n\nThis code uses Psi4 to calculate the energy and optimized bond lengths\nof an OCS molecule constrained to have a bend angle of 120 deg.\n\nThis is a starter code for Lab1.\n\nIn this example, a Z-matrix is cut/paste into the code. To see an\nexample of loading the Z-matrix from a file, check out Lab2starter_OCS.py\n\nOrigin CHE 525, S21 Problem Development\nAuthor: Tom Allison\n\"\"\"\n\n#%%\n# Import modules =======================================================\nimport psi4\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#%%\n# Set up Psi4 ==========================================================\n\npsi4.core.clean() # Reset Psi4 to default startup settings in case anything lingers.\npsi4.core.clean_options()\npsi4.set_memory('4000 MB') # Can make this much larger on Seawulf, each compute node has more than 100 GB RAM\npsi4.set_num_threads(4) # Can make this much larger on Seawulf, each compute node can support 28 threads.\n # But it doesn't help much for small molecules...\npsi4.core.set_output_file('Lab1starter_OCSbent.dat', False) #this command sets psi4's output to a file. Comment this line out if you want to see the output on the terminal.\n\n#%%\n# Do Calculation =======================================================\n\n# example of loading geometry from Z-matrix cut/paste\n# The psi4.geometry below creates a new molecule object representing an\n# OCS molecule with a 120 degree bend angle. \nOCS_120 = psi4.geometry(\"\"\"\n o\n c 1 co2 \n s 2 sc3 1 sco3 \n \nco2= 1.380000\nsc3= 1.780000\nsco3= 120.000\n\"\"\")\n\nOCS_120.reset_point_group('c1') # turn symmetry off!\n\nmethod1 = 'scf/3-21G' # store methods as string variables for later repeated use.\nmethod2 = 'scf/6-311G*'\n\nE0_120_1 = psi4.energy(method1, molecule = OCS_120) # calculate energy using method1\nE0_120_2 = psi4.energy(method2, molecule = OCS_120) # calcualte energy using method2\n\n# Optimize with constrained bend angle.\npsi4.set_module_options('optking',{'frozen_bend': '1 2 3'}) # This command fixes the bend angle of the OCS molecule.\nEopt_120, wfn_opt120 = psi4.optimize(method2, molecule = OCS_120, return_wfn = True) # optimize geometry of molecule with previously defined constraint, return wave function to make .molden file.\npsi4.driver.molden(wfn_opt120, 'OCS_120.molden') #write constrained optimzation results to a .molden file.\n\n# Now turn optimization constraint off and reoptimize to find the global energy minimum\npsi4.core.clean_options() # turn off frozen bend and any other options\nEopt, wfn_opt = psi4.optimize(method2, molecule = OCS_120, return_wfn = True)\n\n#%%\n# Print results to screen =======================================================\n \nprint('Energies at initial positions with different methods:')\nprint()\nprint('Energy at initial geometry with ' + method1 + ' is ' + str(E0_120_1) + ' Hatrees')\nprint('Energy at initial geometry with ' + method2 + ' is ' + str(E0_120_2) + ' Hatrees')\nprint()\nprint('Energy after constrained optimization with ' + method2 + ' is ' + str(Eopt_120) + ' Hatrees')\nprint('Energy after global optimization with ' + method2 + ' is ' + str(Eopt) + ' Hatrees')\n\n#%%\n# Save data to a spreadsheet using Pandas ==========================================\n#\n# d = {'Details': [method1, method2, 'Constrained opt.', 'Global opt.'], 'Energies': [E0_120_1, E0_120_2, Eopt_120, Eopt]}\n# df = pd.DataFrame(data = d)\n# df.to_csv('Lab1starter_OCSbent_results.csv') # save dataframe to .csv spreadsheet file.\n \n# Or of course you can also extract the results from the raw .dat file\n# generated by Psi4.\n\n","sub_path":"Lab1/Lab1starter_OCSbent.py","file_name":"Lab1starter_OCSbent.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"40615677","text":"\"\"\"Running in DeepMind Control Suite Env\"\"\"\nimport random\nimport torch\nimport argparse\nimport os\nimport time\nimport numpy as np\n# -------------------------------\nfrom DDPG import DDPG\n# -------------------------------\nfrom TD3 import TD3\n# -------------------------------\nfrom SAC import SAC\nfrom SAC import SAC_adjusted_temperature\n# -------------------------------\nfrom utils import replay_buffer\nimport environments\n# Tag loggers\nfrom spinupUtils.logx import EpochLogger\nfrom spinupUtils.run_utils import setup_logger_kwargs\n\ndef test_agent(policy, eval_env, logger, eval_episodes=10):\n\tfor _ in range(eval_episodes):\n\t\tepisode_timesteps = 0\n\t\tstate, done, ep_ret, ep_len = eval_env.reset(), False, 0, 0\n\t\twhile not done:\n\t\t\tepisode_timesteps += 1\n\t\t\tif args.policy.startswith(\"SAC\"):\n\t\t\t\taction = policy.select_action(np.array(state), deterministic=True)\n\t\t\telse:\n\t\t\t\taction = policy.select_action(np.array(state))\n\t\t\tstate, reward, done, _ = eval_env.step(action)\n\t\t\ttimeout_done = (episode_timesteps == env.max_episode_steps)\n\t\t\tdone = timeout_done or done\n\t\t\tep_ret += reward\n\t\t\tep_len += 1\n\t\tlogger.store(TestEpRet=ep_ret, TestEpLen=ep_len)\n\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--policy\", default=\"SAC\", type=str) # Policy name\n\tparser.add_argument(\"--env\", default=\"cheetah-run\", type=str) # DeepMind Control Suite environment name\n\tparser.add_argument(\"--seed\", default=0, type=int) # Sets DeepMind Control Suite env, PyTorch and Numpy seeds\n\tparser.add_argument(\"--start_timesteps\", default=25e3, type=int) # Time steps initial random policy is used\n\tparser.add_argument(\"--eval_freq\", default=5e3, type=int) # How often (time steps) we evaluate\n\tparser.add_argument(\"--save_freq\", default=4, type=int) # How often (evaluation steps) we save the model\n\tparser.add_argument(\"--max_timesteps\", default=3e6, type=int) # Max time steps to run environment\n\tparser.add_argument(\"--expl_noise\", default=0.1, type=float) # Std of Gaussian exploration noise\n\tparser.add_argument(\"--batch_size\", default=256, type=int) # Batch size for both actor and critic\n\tparser.add_argument(\"--discount\", default=0.99, type=float) # Discount factor\n\tparser.add_argument(\"--tau\", default=0.005, type=float) # Target network update rate\n\tparser.add_argument(\"--policy_noise\", default=0.2, type=float) # Noise added to target policy during critic update\n\tparser.add_argument(\"--noise_clip\", default=0.5, type=float) # Range to clip target policy noise\n\tparser.add_argument(\"--policy_freq\", default=2, type=int) # Frequency of delayed policy updates\n\tparser.add_argument(\"--alpha\", default=0.2, type=float) # For sac entropy\n\tparser.add_argument(\"--save_model\", action=\"store_true\") # Save model and optimizer parameters\n\tparser.add_argument(\"--exp_name\", type=str) \t\t\t\t # Name for algorithms\n\targs = parser.parse_args()\n\n\tfile_name = f\"{args.policy}_{args.env}_s{args.seed}\"\n\tprint(f\"---------------------------------------\")\n\tprint(f\"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}\")\n\tprint(f\"---------------------------------------\")\n\n\t# Make envs\n\tenv = environments.ControlSuite(args.env)\n\teval_env = environments.ControlSuite(args.env)\n\n\t# Set seeds\n\tenv.seed(args.seed)\n\teval_env.seed(args.seed) # eval env for evaluating the agent\n\ttorch.manual_seed(args.seed)\n\tif torch.cuda.is_available():\n\t\ttorch.cuda.manual_seed_all(args.seed)\n\tnp.random.seed(args.seed)\n\trandom.seed(args.seed)\n\t\n\tstate_dim = env.observation_space.shape[0]\n\taction_dim = env.action_space.shape[0] \n\tmax_action = float(env.action_space.high[0])\n\n\tkwargs = {\n\t\t\"state_dim\": state_dim,\n\t\t\"action_dim\": action_dim,\n\t\t\"max_action\": max_action,\n\t\t\"discount\": args.discount,\n\t\t\"tau\": args.tau,\n\t}\n\n\t# Initialize policy\n\t# ----------------------------------------------\n\tif args.policy == \"DDPG\":\n\t\t# if the formal argument defined in function `DDPG()` are regular params, can pass `**-styled` actual argument.\n\t\tpolicy = DDPG.DDPG(**kwargs)\n\t# ---------------------------------------------------\n\telif args.policy == \"TD3\":\n\t\t# Target policy smoothing is scaled wrt the action scale\n\t\tkwargs[\"policy_noise\"] = args.policy_noise * max_action\n\t\tkwargs[\"noise_clip\"] = args.noise_clip * max_action\n\t\tkwargs[\"policy_freq\"] = args.policy_freq\n\t\tpolicy = TD3.TD3(**kwargs)\n\t# ----------------------------------------------\n\telif args.policy == \"SAC\":\n\t\tkwargs[\"alpha\"] = args.alpha\n\t\tpolicy = SAC.SAC(**kwargs)\n\telif args.policy == \"SAC_adjusted_temperature\":\n\t\tpolicy = SAC_adjusted_temperature.SAC(**kwargs)\n\telse:\n\t\traise ValueError(f\"Invalid Policy: {args.policy}!\")\n\n\tif args.save_model and not os.path.exists(f\"./models/{file_name}\"):\n\t\tos.makedirs(f\"./models/{file_name}\")\n\n\t# Setup loggers\n\tlogger_kwargs = setup_logger_kwargs(args.exp_name, args.seed, datestamp=False)\n\tlogger = EpochLogger(**logger_kwargs)\n\n\t_replay_buffer = replay_buffer.ReplayBuffer(state_dim, action_dim)\n\t\n\tstate, done = env.reset(), False\n\tepisode_reward = 0\n\tepisode_timesteps = 0\n\tepisode_num = 0\n\tstart_time = time.time()\n\n\tfor t in range(int(args.max_timesteps)):\n\t\tepisode_timesteps += 1\n\n\t\t# Select action randomly or according to policy\n\t\tif t < int(args.start_timesteps):\n\t\t\taction = env.action_space.sample()\n\t\telse:\n\t\t\tif args.policy.startswith(\"SAC\"):\n\t\t\t\taction = policy.select_action(np.array(state))\n\t\t\telse:\n\t\t\t\taction = (\n\t\t\t\t\tpolicy.select_action(np.array(state))\n\t\t\t\t\t+ np.random.normal(0, max_action * args.expl_noise, size=action_dim)\n\t\t\t\t).clip(-max_action, max_action)\n\n\t\t# Perform action\n\t\tnext_state, reward, done, _ = env.step(action)\n\n\t\t# If env stops when reaching max-timesteps, then `done_bool = False`, else `done_bool = True`\n\t\ttimeout_done = (episode_timesteps == env.max_episode_steps)\n\t\tdone = timeout_done or done\n\t\tdone_bool = float(done) if not timeout_done else 0 \n\n\t\t# Store data in replay buffer\n\t\t_replay_buffer.add(state, action, next_state, reward, done_bool)\n\n\t\tstate = next_state\n\t\tepisode_reward += reward\n\n\t\t# Train agent after collecting sufficient data\n\t\tif t >= int(args.start_timesteps):\n\t\t\tpolicy.train(_replay_buffer, args.batch_size)\n\n\t\tif done: \n\t\t\tprint(f\"Total T: {t+1}, Episode Num: {episode_num+1}, Episode T: {episode_timesteps}, Reward: {episode_reward:.3f}\")\n\t\t\tlogger.store(EpRet=episode_reward, EpLen=episode_timesteps)\n\t\t\t# Reset environment\n\t\t\tstate, done = env.reset(), False\n\t\t\tepisode_reward = 0\n\t\t\tepisode_timesteps = 0\n\t\t\tepisode_num += 1 \n\n\t\tif (t + 1) % args.eval_freq == 0:\n\t\t\ttest_agent(policy, eval_env, logger)\n\t\t\tif args.save_model and (t + 1) % int(args.eval_freq * args.save_freq) == 0: \n\t\t\t\tpolicy.save(f\"./models/{file_name}/{t+1}_steps\")\n\t\t\tlogger.log_tabular(\"EpRet\", with_min_and_max=True)\n\t\t\tlogger.log_tabular(\"TestEpRet\", with_min_and_max=True)\n\t\t\tlogger.log_tabular(\"EpLen\", average_only=True)\n\t\t\tlogger.log_tabular(\"TestEpLen\", average_only=True)\n\t\t\tlogger.log_tabular(\"TotalEnvInteracts\", t+1)\n\t\t\tlogger.log_tabular(\"Time\", time.time()-start_time)\n\t\t\tlogger.dump_tabular()","sub_path":"main_off_policy_dmc.py","file_name":"main_off_policy_dmc.py","file_ext":"py","file_size_in_byte":7064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"470170306","text":"def readNumber(line, index):\n number = 0\n while index < len(line) and line[index].isdigit():\n number = number * 10 + int(line[index])\n index += 1\n if index < len(line) and line[index] == '.':\n index += 1\n keta = 0.1\n while index < len(line) and line[index].isdigit():\n number += int(line[index]) * keta\n keta /= 10\n index += 1\n token = {'type': 'NUMBER', 'number': number}\n return token, index\n\n\ndef readPlus(line, index):\n token = {'type': 'PLUS'}\n return token, index + 1\n\n\ndef readMinus(line, index):\n token = {'type': 'MINUS'}\n return token, index + 1\n\n\ndef readMult(line, index):\n token = {'type': 'MULT'}\n return token, index + 1\n\n\ndef readDiv(line, index):\n token = {'type': 'DIV'}\n return token, index + 1\n\n\ndef readLPar(line, index):\n token = {'type': 'LPAR'}\n return token, index + 1\n\n\ndef readRPar(line, index):\n token = {'type': 'RPAR'}\n return token, index + 1\n\n\ndef tokenize(line):\n tokens = []\n index = 0\n while index < len(line):\n if line[index].isdigit():\n (token, index) = readNumber(line, index)\n elif line[index] == '+':\n (token, index) = readPlus(line, index)\n elif line[index] == '-':\n (token, index) = readMinus(line, index)\n elif line[index] == '*':\n (token, index) = readMult(line, index)\n elif line[index] == '/':\n (token, index) = readDiv(line, index)\n elif line[index] == '(':\n (token, index) = readLPar(line, index)\n elif line[index] == ')':\n (token, index) = readRPar(line, index)\n else:\n print('Invalid character found: ' + line[index])\n exit(1)\n tokens.append(token)\n return tokens\n\n\n# handle parentheses (returns updated tokens)\ndef evaluate_Par(tokens):\n index = 0\n rpar_index = len(tokens)\n have_lpar = []\n have_rpar = []\n # indexes of '(' and ')' in tokens\n for i in range(len(tokens)):\n if tokens[i]['type'] == 'LPAR':\n have_lpar.append(i)\n elif tokens[i]['type'] == 'RPAR':\n have_rpar.append(i)\n if len(have_lpar) != len(have_rpar): # if the number of '(' and ')' are unequal\n print('Invalid syntax 1')\n exit(1)\n popcount = 0 # to renew indexes after updating tokens\n while have_lpar and have_rpar:\n lpar_index = have_lpar.pop()\n rpar_index = [x for x in have_rpar if x > lpar_index]\n rpar_index = rpar_index[0]\n have_rpar.remove(rpar_index)\n rpar_index -= popcount # renew index\n popcount = 0\n # evaluate tokens inside parentheses and update tokens\n inpar_tokens = tokens[lpar_index+1:rpar_index]\n tokens[lpar_index] = {'type': 'NUMBER', 'number': evaluate_normal(inpar_tokens)}\n del tokens[lpar_index+1:rpar_index+1]\n # check if index should be updated\n if have_lpar:\n next_lpar = have_lpar[len(have_lpar)-1]\n next_rpar = [x for x in have_rpar if x > next_lpar]\n if rpar_index < next_rpar[0]:\n popcount = rpar_index - lpar_index\n return tokens\n\n\n# multiplication and division (returns updated tokens)\ndef evaluate_MD(tokens):\n index = 1\n while index < len(tokens):\n if tokens[index]['type'] == 'NUMBER':\n if tokens[index - 1]['type'] == 'MULT':\n tokens[index - 2]['number'] = tokens[index - 2]['number'] * tokens[index]['number']\n tokens.pop(index)\n tokens.pop(index - 1)\n index -= 1\n elif tokens[index - 1]['type'] == 'DIV':\n if tokens[index]['number'] != 0:\n tokens[index - 2]['number'] = tokens[index - 2]['number'] / tokens[index]['number']\n tokens.pop(index)\n tokens.pop(index - 1)\n index -= 1\n else:\n print('Cannot divide by ZERO')\n exit(1)\n elif tokens[index - 1]['type'] == 'PLUS' or tokens[index - 1]['type'] == 'MINUS':\n index += 1\n else:\n print('Invalid syntax 3')\n exit(1)\n index += 1\n return tokens\n\n\n# plus and minus\ndef evaluate_PM(tokens):\n answer = 0\n tokens.insert(0, {'type': 'PLUS'}) # Insert a dummy '+' token\n index = 1\n while index < len(tokens):\n if tokens[index]['type'] == 'NUMBER':\n if tokens[index - 1]['type'] == 'PLUS':\n answer += tokens[index]['number']\n elif tokens[index - 1]['type'] == 'MINUS':\n answer -= tokens[index]['number']\n else:\n print('Invalid syntax 4')\n exit(1)\n index += 1\n return answer\n\n\n# normal + - * / calculator\ndef evaluate_normal(tokens):\n return evaluate_PM(evaluate_MD(tokens))\n\n\ndef test(line):\n tokens = tokenize(line)\n tokens = evaluate_Par(tokens)\n actualAnswer = evaluate_normal(tokens)\n expectedAnswer = eval(line)\n if abs(actualAnswer - expectedAnswer) < 1e-8:\n print(\"PASS! (%s = %f)\" % (line, expectedAnswer))\n else:\n print(\"FAIL! (%s should be %f but was %f)\" % (line, expectedAnswer, actualAnswer))\n\n\n# Add more tests to this function :)\ndef runTest():\n print(\"==== Test started! ====\")\n test(\"1+2\")\n test(\"1.0+2.1-3\")\n test(\"1\")\n test(\"0*77\")\n test(\"1*5.3\")\n test(\"1.5*4\")\n test(\"1.5*10/3\")\n test(\"5-1.5*10/3\")\n test(\"2-1.5*10-6/30\")\n test(\"1.4/0.7\")\n test(\"(1+5)\")\n test(\"3*(1+5)\")\n test(\"3*(1+5/4)\")\n test(\"100*((7-5*4-3/92)/6)+7/5\")\n test(\"(5+4*2)/3-2*(4+5)\")\n test(\"(1+5)/3-4*(6*(12/7+8))+15\")\n test(\"((1+2)*3)/4-7*2+(4+3)*30\")\n test(\"1+5/675/34*(2+(3/4)+(5-7*34)-2)\")\n # test(\"3-9/0\")\n # test(\"2/0\")\n # test(\"4^5\")\n print(\"==== Test finished! ====\\n\")\n\nrunTest()\n\nwhile True:\n print('> ', end=\"\")\n line = input().replace(\" \", \"\")\n tokens = tokenize(line)\n tokens = evaluate_Par(tokens)\n answer = evaluate_normal(tokens)\n print(\"answer = %f\\n\" % answer)\n","sub_path":"ex03/calculator_modularize.py","file_name":"calculator_modularize.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"429496036","text":"\"\"\"\nString and data utils, where implementation differs between Python 2 & 3\n\"\"\"\nimport sys\nfrom copy import deepcopy\nimport os\n\nPY_MAJOR, PY_MINOR = sys.version_info[:2]\n\nif PY_MAJOR >= 3:\n from . import utils3 as utils_mod\nelse:\n from . import utils2 as utils_mod\n\nSTR2BYTES = utils_mod.STR2BYTES\nBYTES2STR = utils_mod.BYTES2STR\nNULLCHAR = utils_mod.NULLCHAR\nNULLCHAR_2 = utils_mod.NULLCHAR_2\nstrjoin = utils_mod.strjoin\nis_string = utils_mod.is_string\nis_string_or_bytes = utils_mod.is_string_or_bytes\nascii_string = utils_mod.ascii_string\n\nmemcopy = deepcopy\nif PY_MAJOR == 2 and PY_MINOR == 5:\n def memcopy(a):\n return a\n\ndef clib_search_path(lib):\n '''Assemble path to c library.\n\n Parameters\n ----------\n lib : str\n Either 'ca' or 'Com'.\n\n Returns\n --------\n str : string\n\n Examples\n --------\n >>> clib_search_path('ca')\n 'linux64/libca.so'\n\n '''\n\n # determine which libca / libCom dll is appropriate\n try:\n import platform\n nbits = platform.architecture()[0]\n except:\n nbits = '32bit'\n nbits = nbits.replace('bit', '')\n\n libfmt = 'lib%s.so'\n if os.name == 'nt':\n libsrc = 'win'\n libfmt = '%s.dll'\n elif sys.platform == 'darwin':\n libsrc = 'darwin'\n libfmt = 'lib%s.dylib'\n elif sys.platform.startswith('linux'):\n libsrc = 'linux'\n else:\n return None\n\n return os.path.join(\"%s%s\" % (libsrc, nbits), libfmt % lib)\n","sub_path":"epics/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67387053","text":"from django.shortcuts import render\nfrom django.template import loader\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\n# from django.views.generic import CreateView\n# from desenvolvimento.forms import Projet oForm\nfrom .models import *\n\n#\n# def index(request):\n# return render_to_response('index.html', {})\n\n\ndef index(request):\n listadeProjetos = Projeto.objects.all().order_by('-datadeFim')[:5]\n t =loader.get_template('index.html')\n c = RequestContext(request, {\n 'listadeProjetos': listadeProjetos,\n })\n return HttpResponse(t.render(c))\n\ndef curso(request):\n listaDeCursos = Curso.objects.all().order_by('-nome')\n t = loader.get_template('curso.html')\n c = RequestContext(request, {\n 'listaDeCursos': listaDeCursos,\n })\n return HttpResponse(t.render(c))\n\ndef professor(request):\n listasdeFunc = Professor.objects.all().order_by('nome')\n\n t = loader.get_template('professor.html')\n c = RequestContext(request, {\n 'listasdeProf': listasdeFunc,\n })\n return HttpResponse(t.render(c))\n\ndef eventos(request):\n return render_to_response('eventos.html', {})\n\n\n\n\n# class ProjetoView(CreateView):\n# #template_name = 'form.html'\n# template_name = 'index.html'\n# model = Projeto\n# form_class = ProjetoForm\n# # success_url = '/envie/?sucesso=1#sucesso'\n#\n#\n# #\n# # def pesquisa(request):\n# if request.method=='POST':\n# form = searchform(request.get)\n# if form.is_valid():\n# form.save()\n# return render_to_response('pessoa/concluido.html',\n# context_instance = RequestContext(request))\n# else:\n# form = PessoaForm()\n# return render_to_response('pessoa/cadastro.html', {'form': form,},\n# context_instance = RequestContext(request))\n# # def search(request):\n\n\n#\n# def buscar(request):\n# pass\n\n#\n# def view_generica(request, *args, **kwargs):\n# if request.GET['search']:\n# view_certa = buscar(request)\n# else:\n# return HttpResponse(\"Erro\")\n#\n# return view_certa(request, *args, **kwargs)\n\n","sub_path":"desenvolvimento/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"225763706","text":"import torch\nimport torch.nn as nn\nfrom torch.utils import data\nfrom torchvision import transforms\nimport torch.optim as optim\nimport numpy as np\nimport os\nimport argparse\nimport time\nimport torchvision\nfrom model import MyDataset, MLP_Dataset, LSTM_Dataset, autoencoder, autoencoder_B, MLP, Unet, LSTM, LSTM_B, AE_3D_Dataset, autoencoder_3D\nfrom train import training, validation\nfrom utils import load_transfer_learning, insert_time_channel\nimport warnings\nimport pdb\n\nif __name__ == '__main__':\n\n #arguments for num_epochs and batch_size\n parser = argparse.ArgumentParser()\n parser.add_argument(dest='arg1', type=int, help=\"Number of Epochs\")\n parser.add_argument(dest='arg2', type=int, default=16, help=\"Batch Size\")\n\n args = parser.parse_args()\n num_epochs = args.arg1\n batch_size = args.arg2\n\n print(num_epochs, batch_size)\n\n #Making folders to save reconstructed images, input images and weights\n if not os.path.exists(\"../output\"):\n os.mkdir(\"../output\")\n\n if not os.path.exists(\"../input\"):\n os.mkdir(\"../input\")\n\n if not os.path.exists(\"../weights\"):\n os.mkdir(\"../weights\")\n\n warnings.filterwarnings('ignore')\n\n #Running the model on CUDA\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n u = np.load('../data/cylinder_u.npy', allow_pickle=True)[:-1, ...]\n # print(u.shape[0])\n # u = np.load('../data/boussinesq_u.npy', allow_pickle=True)\n print('Data loaded')\n\n #train/val split\n train_to_val = 0.85\n # rand_array = np.random.permutation(1500)\n # print(rand_array)\n\n u_train = u[:int(train_to_val*u.shape[0]), ...]\n u_validation = u[int(train_to_val*u.shape[0]):, ...]\n\n print(u_train.shape)\n print(u_validation.shape)\n\n # u = insert_time_channel(u, 10)\n # print(u.shape);\n\n img_transform = transforms.Compose([\n # transforms.ToPILImage(),\n # transforms.RandomVerticalFlip(p=0.5),\n transforms.ToTensor(),\n # transforms.Normalize([0.5], [0.5])\n ])\n\n # batch_size = 16\n #Train data_loader\n train_dataset = AE_3D_Dataset(u_train, transform=img_transform)\n train_loader_args = dict(batch_size=batch_size, shuffle=True, num_workers=4)\n train_loader = data.DataLoader(train_dataset, **train_loader_args)\n\n # print(len(train_loader))\n \n #val data_loader\n validation_dataset = AE_3D_Dataset(u_validation, transform=img_transform)\n val_loader_args = dict(batch_size=1, shuffle=False, num_workers=4)\n val_loader = data.DataLoader(validation_dataset, **val_loader_args)\n\n #Loading Model\n TL = False\n if TL:\n final_model = LSTM()\n pretrained = autoencoder()\n PATH = \"../weights/1000.pth\"\n # PATH = \"../weights/bous_500.pth\"\n # pdb.set_trace()\n model = load_transfer_learning(pretrained, final_model, PATH)\n else:\n model = autoencoder_3D()\n\n model=model.to(device)\n\n #Instances of optimizer, criterion, scheduler\n\n optimizer = optim.Adam(model.parameters(), lr=0.1)\n criterion=nn.L1Loss()\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', \n factor=0.1, patience=5, verbose=False, \n threshold=1e-3, threshold_mode='rel', \n cooldown=5, min_lr=1e-5, eps=1e-08)\n\n #Epoch loop\n for epoch in range(num_epochs):\n start_time=time.time()\n print('Epoch no: ',epoch)\n train_loss = training(model,train_loader,criterion,optimizer)\n \n #Saving weights after every 20epochs\n if epoch%50==0 and epoch !=0:\n output=validation(model,val_loader,criterion)\n name='../output/'+str(epoch) +'.npy' \n #name_in='../input/'+str(epoch) +'.npy' \n np.save(name,output)\n del output\n # np.save(name_in,inp)\n\n if epoch%20==0:\n path='../weights/'+ str(epoch) +'_t.pth'\n torch.save(model.state_dict(),path)\n print(optimizer)\n \n scheduler.step(train_loss)\n print(\"Time : \",time.time()-start_time)\n print('='*50)","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"540539288","text":"import marshal\nimport multiprocessing\nimport traceback\nimport sys\n\nfrom pyaloha.protocol.base import WorkerResults, custom_loads\n\nif sys.version_info[0] == 3:\n from collections.abc import Iterable, Mapping\nelse:\n from collections import Iterable, Mapping\n\n\nclass MarshalWorkerResults(WorkerResults):\n @classmethod\n def to_basic_types(cls, obj):\n if hasattr(obj, '__dumpdict__'):\n obj = obj.__dumpdict__()\n elif isinstance(obj, Mapping):\n obj = {\n key: cls.to_basic_types(value)\n for key, value in obj.items()\n }\n elif isinstance(obj, Iterable) and not isinstance(obj, (str, bytes)):\n obj = [\n cls.to_basic_types(sub_obj)\n for sub_obj in obj\n ]\n return obj\n\n @classmethod\n def from_basic_types(cls, obj):\n if isinstance(obj, Mapping):\n _obj = custom_loads(obj)\n if obj is _obj:\n obj = {\n key: cls.from_basic_types(value)\n for key, value in obj.items()\n }\n else:\n obj = _obj\n elif isinstance(obj, Iterable) and not isinstance(obj, (str, bytes)):\n obj = [\n cls.from_basic_types(sub_obj)\n for sub_obj in obj\n ]\n return obj\n\n @classmethod\n def dumps(cls, obj, debug=False):\n return str(marshal.dumps(cls.to_basic_types(obj)))\n\n @classmethod\n def loads(cls, data):\n try:\n return cls.from_basic_types(marshal.loads(data))\n except Exception as err:\n logger = multiprocessing.get_logger()\n logger.error('Corrupted data of len:\\n%s' % len(data))\n traceback.print_exc(err)\n return []\n","sub_path":"snippets/pyaloha/protocol/basic_marshal.py","file_name":"basic_marshal.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"470116761","text":"#!/bin/bash/python\n#coding=utf-8\nimport Image\n\nold = Image.open('mozart.gif')\ns = old.size\ndata = list(old.getdata())\nnew = Image.new(old.mode,s)\nnew.putpalette(old.palette)\n\nreceive = new.load()\nfor i in range(s[1]):\n for j in range(s[0]):\n if data[i*s[0]+j] == 195:\n for pix,x in zip(data[i*s[0]+j:s[0]*(i+1)]+data[i*s[0]:i*s[0]+j-1],range(s[0])):\n receive[x,i]=pix\n break\n\nnew.save('new.gif',old.format)\n","sub_path":"16/16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"548964968","text":"from moviepy.editor import VideoFileClip\nimport matplotlib.pyplot as plt\n\nvideo_path = 'test.mp4'\noutput_img = 'output.jpeg'\ntime_mark = 3.15\n\n\nclip = VideoFileClip(video_path)\nframe = clip.get_frame(time_mark)\nplt.imshow(frame)\nplt.show()\n","sub_path":"get_frame.py","file_name":"get_frame.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"239683045","text":"import dataclasses\nimport logging\nfrom enum import Enum\nfrom decimal import Decimal\nfrom typing import Optional, List\n\nfrom requests import Session\nfrom requests.exceptions import ConnectionError\nfrom rest_food.settings import YANDEX_API_KEY\nfrom rest_food.settings import GOOGLE_API_KEY\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass YandexBBox(Enum):\n BELARUS = '23.579,51.5~32.6,56.2'\n MINSK = '27.4,53.83~27.7,54'\n\n\nclass GoogleBounds(Enum):\n GDANSK = '54.3,18.5|54.6,18.8'\n WARSZAWA = '52,20.5|52.5,21.3'\n POLAND = '49.13,14.3|55,24.5'\n\n\n@dataclasses.dataclass\nclass GeoCoderResult:\n latitude: Decimal\n longitude: Decimal\n is_sure: bool\n\n\n_http_session = Session()\n\n\ndef _call_yandex_geocoder(address: str, bbox: YandexBBox) -> Optional[GeoCoderResult]:\n \"\"\"Deprecated.\"\"\"\n logger.info('Geocode %s for %s', address, bbox.name)\n url = (\n f'https://geocode-maps.yandex.ru/1.x/?'\n f'apikey={YANDEX_API_KEY}&'\n f'geocode={address}&'\n f'bbox={bbox.value}&'\n f'rspn=1&'\n f'format=json'\n )\n logger.debug(url)\n\n try:\n response = _http_session.get(url, timeout=5)\n except ConnectionError:\n logger.exception('Connection error while geocode.')\n return None\n\n if response.status_code != 200:\n logger.warning(\n 'Geocoder API %s status code. Content below:\\n%s',\n response.status_code,\n response.content\n )\n return None\n\n try:\n data = response.json()\n except:\n logger.warning('Non-json geocoder response. Content below:%s', response.content)\n return None\n\n try:\n results_count = int(\n data['response']['GeoObjectCollection']\n ['metaDataProperty']['GeocoderResponseMetaData']['found']\n )\n except KeyError as e:\n logger.warning(\n \"Can't get 'found' data in geocoder response. Key (%s) lost. Content below:%s\",\n e, data\n )\n return None\n except ValueError:\n logger.warning(\"Unexpected 'found' number format. Content below:%s\", data)\n return None\n\n\n if results_count == 0:\n return None\n\n try:\n coordinates_string = (\n data['response']['GeoObjectCollection']\n ['featureMember'][0]['GeoObject']['Point']['pos']\n )\n longitude, latitude = coordinates_string.split()\n latitude = Decimal(latitude)\n longitude = Decimal(longitude)\n\n except KeyError as e:\n logger.warning(\n \"Can't get 'pos' key in geocoder response. Key (%s) lost. Content below:\\n%s\",\n e, data\n )\n return None\n except (ArithmeticError, ValueError):\n logger.warning(\n \"Can't get coordinates: %s\",\n data['response']['GeoObjectCollection']['featureMember'][0]['GeoObject']['Point']['pos']\n )\n return None\n\n return GeoCoderResult(latitude=latitude, longitude=longitude, is_sure=results_count == 1)\n\n\ndef _call_google_geocoder(address: str, bounds: GoogleBounds) -> Optional[GeoCoderResult]:\n logger.info('Geocode %s for %s by Google API.', address, bounds.name)\n url = 'https://maps.googleapis.com/maps/api/geocode/json'\n params = {\n 'address': address,\n 'key': GOOGLE_API_KEY,\n 'bounds': bounds.value,\n }\n\n logger.debug('Making geocoding call.', extra={'url': url, 'address': address, 'bounds': bounds.value})\n\n try:\n response = _http_session.get(url, params=params, timeout=5)\n except ConnectionError:\n logger.exception('Connection error while geocode.')\n return None\n\n if response.status_code != 200:\n logger.warning(\n 'Geocoder API %s status code. Content below:\\n%s',\n response.status_code,\n response.content\n )\n return None\n\n try:\n data = response.json()\n except:\n logger.warning('Non-json geocoder response. Content below:%s', response.content)\n return None\n\n if 'results' not in data or len(data['results']) == 0:\n logger.info('No data found.', extra={'bounds': bounds.name})\n return None\n\n location = data['results'][0]['geometry']['location']\n\n return GeoCoderResult(\n latitude=Decimal(location['lat']), longitude=Decimal(location['lng']), is_sure=len(data['results']) == 1,\n )\n\n\ndef geocode(address: str) -> Optional[GeoCoderResult]:\n last_retrieved = None\n\n for bounds in GoogleBounds:\n geocoding_data = _call_google_geocoder(address, bounds)\n if geocoding_data is not None:\n last_retrieved = geocoding_data\n if geocoding_data.is_sure:\n break\n\n return last_retrieved\n\n\ndef get_coordinates(address: str) -> Optional[List[Decimal]]:\n geocoded = geocode(address)\n return geocoded and [geocoded.latitude, geocoded.longitude]\n","sub_path":"rest_food/common/geocoding.py","file_name":"geocoding.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198897137","text":"\"\"\"Configure basic logger for the project.\"\"\"\n\nimport logging\nimport src.config as cfg\n\n# Create a custom logger\nlogger = logging.getLogger(__name__)\n\n# Set logging level according to configuration settings\nlog_lvl = logging.DEBUG if cfg.DEBUG_LOGGING else logging.INFO\n\n# Create handlers\nc_handler = logging.StreamHandler()\nc_handler.setLevel(log_lvl)\n\n# Create formatters and add it to handlers\nc_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')\nc_handler.setFormatter(c_format)\n\n# Add handlers to the logger\nlogger.addHandler(c_handler)\nlogger.setLevel(log_lvl)\n\nlogger.debug(\"Logger set.\")\n","sub_path":"src/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"411986739","text":"from typing import Dict,Optional\nfrom dbgen.core.parsing import parse_line\n\ndef get_sigma_qe(log:str\n ) -> Optional[float]:\n \"\"\"\n Extract Fermi-Dirac smearing from QE log file\n \"\"\"\n # constants\n #----------\n ryd_to_ev = 13.60569\n\n # main program\n #------------\n parsed = parse_line(log,'Fermi-Dirac smearing',0)\n if parsed is None:\n raise ValueError('malformed QE logfile?')\n else:\n raw = parsed.split()[-1]\n return round(ryd_to_ev * float(raw),3)\n","sub_path":"catalog_model/scripts/Pure/Load/get_sigma_qe.py","file_name":"get_sigma_qe.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"445412070","text":"#!/usr/bin/python3\n\nimport argparse\nimport tarfile\nfrom lxml import etree\nimport csv\n\nparser = argparse.ArgumentParser(description='''\nExtract specific fields from the NCBI SRA Metadata XML files inside a tar archive.\n''')\nparser.add_argument(\"metadata_tar_gz\", help='NCBI SRA metadata tar.gz file '\n '(NCBI_SRA_Metadata_Full_20XXXXXX.tar.gz)')\nparser.add_argument(\"output_tsv\", help='tsv output file the selected fields will be written to')\nparser.add_argument(\"tag\", nargs='+', help='The fields to retrieve from the XML files, '\n 'e.g. \"latitude\"')\nargs = parser.parse_args()\n\ntotal_sample_count = 0\n\nwith tarfile.open(args.metadata_tar_gz, 'r') as tar, open(args.output_tsv, 'w') as tsv:\n tsv_out = csv.writer(tsv, delimiter='\\t')\n tsv_out.writerow(['sample_accession'] + args.tag)\n for item in tar:\n if item.isfile and item.name.endswith('.sample.xml'):\n root = etree.XML(tar.extractfile(item).read())\n samples = root.findall('SAMPLE')\n for sample in samples:\n total_sample_count += 1\n desired_tag_values = {key: '' for key in args.tag}\n sample_attrs = sample.findall('SAMPLE_ATTRIBUTES/SAMPLE_ATTRIBUTE')\n for sample_attr in sample_attrs:\n for tag in args.tag:\n if sample_attr is not None and sample_attr.findtext('TAG') == tag:\n if total_sample_count % 1000 == 0:\n print('Samples processed: %s' % total_sample_count)\n desired_tag_values[tag] = sample_attr.findtext('VALUE')\n if any([False if desired_tag_values[tag] == '' else True for tag in args.tag]):\n tsv_out.writerow([sample.get('accession')] + [desired_tag_values[tag] for tag in args.tag])\n print('Total amount of samples processed: %s' % total_sample_count)\n","sub_path":"sra-sample-metadata-parser/0.1/sra-sample-metadata-parser.py","file_name":"sra-sample-metadata-parser.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"649999083","text":"import sys\n\nvcf_file_path = sys.argv[1]\ndict_file_path = sys.argv[2]\nout_vcf_file_path = sys.argv[3]\n\nwith open(dict_file_path) as dict_file:\n sequences = set()\n for line in dict_file:\n if not \"SN:\" in line:\n continue\n\n line_items = line.rstrip(\"\\n\").split(\"\\t\")\n sequences.add(line_items[1].replace(\"SN:\", \"\"))\n\nwith open(vcf_file_path) as vcf_file:\n with open(out_vcf_file_path, 'w') as out_vcf_file:\n for line in vcf_file:\n if line.startswith(\"#\"):\n if not line.startswith(\"##contig=\"):\n out_vcf_file.write(line)\n continue\n\n if line.split(\"\\t\")[0] in sequences:\n out_vcf_file.write(line)\n","sub_path":"src/helper/reconcile_vcf_with_dict.py","file_name":"reconcile_vcf_with_dict.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"496448762","text":"from __future__ import division, print_function, absolute_import\nimport tflearn\nimport speech_data\nimport tensorflow as tf\n\n\n# hdf5 is not supported on this machine (please install/reinstall h5py for optimal experience)\n# curses is not supported on this machine (please install/reinstall curses for an optimal experience)\n\n#learning rate. The higher the learning rate the faster the network trains.\n# the slower the learning rate, the slower the network is trained but it is more accurate\nlearning_rate = 0.0001\ntraining_iters = 300000 # steps we want to train for\nbatch_size = 64\n\nwidth = 20 # mfcc features\nheight = 80 # (max) length of utterance\nclasses = 10 # digits (The number of digits that we are training 1- 9)\n\nbatch = word_batch = speech_data.mfcc_batch_generator(batch_size) #downloads the .wav files that have a recording of different spoken numbers\nX, Y = next(batch) #labeled speach files\ntrainX, trainY = X, Y\ntestX, testY = X, Y #overfit for now\n\n# uses Recurrent Neural Network (RNN)\n#tensor is a multi dimensional array of data\n# Network building\n\n#width is the number of features abstracted from the utterances from our speech helper class\n#height is the max length of each utterance\nnet = tflearn.input_data([None, width, height])\n\n#128 is the number of neurons\n#dropout helps prevents overfitting by turning of neurons during training. This allows for a more generalized model\nnet = tflearn.lstm(net, 128, dropout=0.8) #LSTM is a network that remembers everything it has learned. USed for state of the art speech recognition\n#softmax converts the numerical data into numerical probabilities\nnet = tflearn.fully_connected(net, classes, activation='softmax')\n#regression will output a single predictive number for our utterance\nnet = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy')\n# Training\n\n### add this \"fix\" for tensorflow version errors\ncol = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\nfor x in col:\n tf.add_to_collection(tf.GraphKeys.VARIABLES, x ) \n\n\nmodel = tflearn.DNN(net, tensorboard_verbose=0)\nwhile 1: #training_iters\n model.fit(trainX, trainY, n_epoch=10, validation_set=(testX, testY), show_metric=True,\n batch_size=batch_size)\n _y=model.predict(X)\nmodel.save(\"tflearn.lstm.model\")\nprint (_y)\nprint (y)\n","sub_path":"Speech Recognition/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"49533184","text":"\n# A dictionary of options for dynamic stein variational methods\noptions = {\n # 1. options for the parameter type\n \"type_parameter\": \"field\",\n # field: the parameter is a spatial field\n # vector: the parameter is a vector of finite dimensions\n \"is_projection\": False,\n # if True, the parameter is the coefficient of a high-dimensional parameter projected into a subspace\n \"type_projection\": \"hessian\",\n # hessian: Hessian-based projection E[\\nabla^2 \\log \\pi]\n # fisher: Fisher-based projection E[\\nabla \\log pi (\\nabla \\log \\pi)^T]\n \"is_precondition\": False,\n # True: use Fisher or Hessian matrix to precondition SVGD\n \"tol_projection\": 1.e-1,\n # the tolerance for dimension truncation/projection, i.e., |\\lambda_{j+1}| <= tol <= |\\lambda_j|\n \"reduce_tol_projection\": 2,\n # reduce the projection tolerance by a factor of 10\n \"coefficient_dimension\": 10,\n # the number of the bases for projection, or the dimension of the projection coefficient vector\n \"add_dimension\": 2,\n # the # of dimensions to add when the training converges in the projected subspace\n\n # 2. options for initializing and dynamically adding particles in the optimization process\n \"seed\": 'random',\n # random: generate particles with random seed\n # fixed: generate particles with fixed seed\n \"number_particles\": 1,\n # number of initial particles used to construct the transport map\n \"number_particles_add\": 0,\n # number of additional particles not used in constructing the transport map, but used\n # e.g., to compute the normalization constant as independent samples pushed to posterior\n \"add_number\": 0,\n # if 0, do not add particles\n # the number of particles to be added sampled from Laplace distribution at each particle, 1, 2, 3, ...\n \"add_step\": 10,\n # add particles every n steps of optimization, n = add_step,\n # this should be changed to more suitable criteria\n \"add_rule\": 1,\n # if 1, add particles and use all of them to construct the transport map\n # if 2, add particles but only add the ones added in the previous step to construct the transport map\n # if 3, add particles but do NOT use them to construct the transport map\n\n # 3. options for the optimization method\n \"type_optimization\": \"newtonSeparated\",\n # newtonSeparated: Stein variational Newton with separated system\n # newtonCoupled: Stein variational Newton with coupled system\n # gradientDescent: Stein variational gradient decent\n \"type_Hessian\": \"lumped\",\n # \"full\": assemble the fully coupled Hessian system, only work for NewtonCoupled\n # \"lumped\": add the Hessian in the same row to the diagonal one, work for both NewtonCoupled and NewtonSeparated\n # \"diagonal\": only use the diagonal part of the Hessian system, work for both NewtonCoupled and NewtonSeparated\n\n # 4. options for the Hessian misfit term\n \"type_approximation\": \"fisher\",\n # hessian: use Hessian\n # fisher: use Fisher information matrix as approximation of the local and averaged Hessian\n \"randomized_eigensolver\": True,\n # True: use randomized algorithm to solve the eigenvalue problem, otherwise, use scipy.linalg.eigh\n \"low_rank_Hessian\": False,\n # True: solve the generalized eigenvalue problem (H, R), R is the inverse of prior covariance\n \"rank_Hessian\": 20,\n # the rank of the Hessian of the misfit term at each particle\n \"rank_Hessian_tol\": 1.e-1,\n # the tolerance to determine the # of ranks such that H \\psi_i = \\lambda_i R \\psi_i, \\lambda_i >= rank_Hessian_tol\n \"low_rank_Hessian_average\": True,\n # True: solve the generalized eigenvalue problem (H, R), R is the inverse of prior covariance\n \"rank_Hessian_average\": 20,\n # the rank of the Hessian of the misfit term averaged over all particles\n \"rank_Hessian_average_tol\": 1.e-1,\n # the tolerance to determine the # of ranks such that H \\psi_i = \\lambda_i R \\psi_i, \\lambda_i >= rank_Hessian_tol\n \"gauss_newton_approx\": False,\n # if True, use Gauss Newton approximation of the Hessian of the misfit term, to make the system well-posed to solve\n \"max_iter_gauss_newton_approx\": 10,\n # the maximum number n of optimization steps to use gauss_newton_approx\n # if not 0, \"gauss_newton_approx\" is automatically set to True before reaching n\n\n # 5. options for the kernel\n \"type_metric\": \"posterior_separate\",\n # \"prior\": the prior covariance\n # \"posterior_average\": the average of the posterior covariance at all current particles;\n # \"posterior_separate\": the posterior covariance at each of the current particle;\n \"type_scaling\": 1,\n # 0: no scaling\n # 1: scale the metric by the parameter dimension\n # 2: scale the metric by the trace of the negative log posterior\n # 3: scale the metric by the mean of the particle distances\n # 4: scale the metric by the mean of the balanced posterior gradient and kernel gradient\n \"kernel_vectorized\": False,\n # if True, compute kernel using vectorized structure\n \"save_kernel\": True,\n # if True, save the evaluations of the kernel and its gradient at every particle\n # set False for large numbers of both parameter dimension and particles, due to prohibitive memory usage\n \"delta_kernel\": False,\n # if True, set k(pn, pm) = 0 for pn != pm, and k(pn, pn) = 1, in order to accelerate moving particles to posterior\n \"max_iter_delta_kernel\": 0,\n # the maximum number n of optimization steps to use delta kernel\n # if not 0, \"delta_kernel\" is automatically set to True before reaching n\n\n # 6. options for saving data and stopping optimization\n \"plot\": False,\n # plot figures during construction\n \"update_step\": 10,\n # update projection basis every n steps\n \"save_step\": 10,\n # save data every n steps\n \"save_number\": 0,\n # the number of particles, kernels, eigenvalues to be generated and saved, if 0, do not save\n \"rel_tolerance\": 1e-6,\n # stop when sqrt(g,g)/sqrt(g_0,g_0) <= rel_tolerance\n \"abs_tolerance\": 1e-12,\n # stop when sqrt(g,g) <= abs_tolerance\n \"gdm_tolerance\": 1e-18,\n # stop when (g,dm) <= gdm_tolerance\n \"step_tolerance\": 1e-3,\n # stop when (dm, dm) * step_size <= 1e-3\n \"step_projection_tolerance\": 1e-2,\n # update projected bases when (dm, dm) * step_size <= 1e-3, which should be larger than step_tolerance\n \"reduce_step_projection_tolerance\": 2,\n # reduce the step_projection_tolerance by a factor of 2\n \"inner_rel_tolerance\": 1e-9,\n # relative tolerance used for the solution of the forward, adjoint, and incremental (fwd,adj) problems\n \"max_iter\": 100,\n # maximum number of iterations for the optimization\n \"cg_coarse_tolerance\": .5e-2,\n # Coarsest tolerance for the CG method (Eisenstat-Walker)\n \"line_search\": True,\n # do line search if True\n \"search_size\": 1.,\n # step size to start the line search\n \"c_armijo\": 1e-4,\n # Armijo constant for sufficient reduction\n \"max_backtracking_iter\": 10,\n # Maximum number of backtracking iterations\n \"print_level\": -1,\n # Control verbosity of printing screen\n \"termination_reasons\": [\n \"Maximum number of Iteration reached\", # 0\n \"Norm of the gradient less than tolerance\", # 1\n \"Maximum number of backtracking reached\", # 2\n \"Norm of (g, dm) less than tolerance\", # 3\n \"Norm of alpha * dm less than tolerance\" # 4\n ]\n # the reasons for terminating the optimization\n}","sub_path":"nonPDE_Models/stein/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":7471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"347087213","text":"import uuid\n\nfrom app.notifications.process_client_response import (\n validate_callback_data,\n process_sms_client_response\n)\n\n\ndef test_validate_callback_data_returns_none_when_valid():\n form = {'status': 'good',\n 'reference': 'send-sms-code'}\n fields = ['status', 'reference']\n client_name = 'sms client'\n\n assert validate_callback_data(form, fields, client_name) is None\n\n\ndef test_validate_callback_data_return_errors_when_fields_are_empty():\n form = {'monkey': 'good'}\n fields = ['status', 'cid']\n client_name = 'sms client'\n\n errors = validate_callback_data(form, fields, client_name)\n assert len(errors) == 2\n assert \"{} callback failed: {} missing\".format(client_name, 'status') in errors\n assert \"{} callback failed: {} missing\".format(client_name, 'cid') in errors\n\n\ndef test_validate_callback_data_can_handle_integers():\n form = {'status': 00, 'cid': 'fsdfadfsdfas'}\n fields = ['status', 'cid']\n client_name = 'sms client'\n\n result = validate_callback_data(form, fields, client_name)\n assert result is None\n\n\ndef test_validate_callback_data_returns_error_for_empty_string():\n form = {'status': '', 'cid': 'fsdfadfsdfas'}\n fields = ['status', 'cid']\n client_name = 'sms client'\n\n result = validate_callback_data(form, fields, client_name)\n assert result is not None\n assert \"{} callback failed: {} missing\".format(client_name, 'status') in result\n\n\ndef test_outcome_statistics_called_for_successful_callback(sample_notification, mocker):\n stats_mock = mocker.patch('app.notifications.process_client_response.create_outcome_notification_statistic_tasks')\n mocker.patch(\n 'app.notifications.process_client_response.notifications_dao.update_notification_status_by_id',\n return_value=sample_notification\n )\n\n reference = str(uuid.uuid4())\n\n success, error = process_sms_client_response(status='3', reference=reference, client_name='MMG')\n assert success == \"MMG callback succeeded. reference {} updated\".format(str(reference))\n assert error is None\n stats_mock.assert_called_once_with(sample_notification)\n\n\ndef test_process_sms_response_return_success_for_send_sms_code_reference(mocker):\n stats_mock = mocker.patch('app.notifications.process_client_response.create_outcome_notification_statistic_tasks')\n\n success, error = process_sms_client_response(status='000', reference='send-sms-code', client_name='sms-client')\n assert success == \"{} callback succeeded: send-sms-code\".format('sms-client')\n assert error is None\n stats_mock.assert_not_called()\n\n\ndef test_process_sms_response_returns_error_bad_reference(mocker):\n stats_mock = mocker.patch('app.notifications.process_client_response.create_outcome_notification_statistic_tasks')\n\n success, error = process_sms_client_response(status='000', reference='something-bad', client_name='sms-client')\n assert success is None\n assert error == \"{} callback with invalid reference {}\".format('sms-client', 'something-bad')\n stats_mock.assert_not_called()\n\n\ndef test_process_sms_response_returns_error_for_unknown_sms_client(mocker):\n stats_mock = mocker.patch('app.notifications.process_client_response.create_outcome_notification_statistic_tasks')\n success, error = process_sms_client_response(status='000', reference=str(uuid.uuid4()), client_name='sms-client')\n\n assert success is None\n assert error == 'unknown sms client: {}'.format('sms-client')\n stats_mock.assert_not_called()\n\n\ndef test_process_sms_response_returns_error_for_unknown_status(mocker):\n stats_mock = mocker.patch('app.notifications.process_client_response.create_outcome_notification_statistic_tasks')\n\n success, error = process_sms_client_response(status='000', reference=str(uuid.uuid4()), client_name='Firetext')\n assert success is None\n assert error == \"{} callback failed: status {} not found.\".format('Firetext', '000')\n stats_mock.assert_not_called()\n","sub_path":"tests/app/notifications/test_process_client_response.py","file_name":"test_process_client_response.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544578935","text":"\"\"\"\nCopyright (C) 2021 SE Slash - All Rights Reserved\nYou may use, distribute and modify this code under the\nterms of the MIT license.\nYou should have received a copy of the MIT license with\nthis file. If not, please write to: secheaper@gmail.com\n\"\"\"\n\n\"\"\"\nThe formatter module focuses on processing raw text and returning it in \nthe required format. \n\"\"\"\n\nfrom datetime import datetime\nimport math\n\ndef formatResult(website, titles, prices, links,ratings,df_flag, currency):\n \"\"\"\n The formatResult function takes the scraped HTML as input, and extracts the \n necessary values from the HTML code. Ex. extracting a price '$19.99' from\n a paragraph tag.\n Parameters: titles- scraped titles of the products, prices- scraped prices of the products, \n links- scraped links of the products on the respective e-commerce sites, \n ratings-scraped ratings of the product\n Returns: A dictionary of all the parameters stated above for the product\n \"\"\"\n\n title, price, link, rating, converted_cur = '', '', '', '', ''\n if titles: title = titles[0].get_text().strip()\n if prices: price = prices[0].get_text().strip()\n if '$' not in price:\n price='$'+price\n if links: link = links[0]['href']\n if ratings: rating = float(ratings[0].get_text().strip().split()[0])\n #if df_flag==0: title=formatTitle(title)\n #if df_flag==0: link=formatTitle(link)\n if currency: converted_cur = getCurrency(currency, price)\n product = {\n 'timestamp': datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"),\n \"title\": title,\n \"price\": price, \n \"link\":f'www.{website}.com{link}', \n \"website\": website,\n \"rating\" : rating,\n \"converted price\": converted_cur\n }\n \n return product\n\n\ndef sortList(arr, sortBy, reverse):\n \"\"\" It sorts the products list based on the flags provided as arguements. Currently, it supports sorting by price.\n Parameters- SortBy- \"pr\": sorts by price, SortBy- \"ra\": sorts by rating\n Returns- Sorted list of the products based on the parameter requested by the user\n \"\"\"\n if sortBy == \"pr\":\n return sorted(arr, key=lambda x: getNumbers(x[\"price\"]), reverse=reverse)\n # To-do: sort by rating\n elif sortBy == \"ra\":\n return sorted(arr, key=lambda x: getNumbers(x[\"rating\"]), reverse=reverse)\n pass\n return arr\n\ndef formatSearchQuery(query):\n \"\"\" It formats the search string into a string that can be sent as a url paramenter.\n \"\"\"\n return query.replace(\" \", \"+\")\n\ndef formatTitle(title):\n \"\"\" It formats titles extracted from the scraped HTML code.\n \"\"\"\n if(len(title) > 40):\n return title[:40] + \"...\"\n return title\n\n\ndef getNumbers(st):\n \"\"\" It extracts float values for the price from a string.\n Ex. it extracts 10.99 from '$10.99' or 'starting at $10.99'\n \"\"\"\n ans = ''\n for ch in st:\n if (ch >= '0' and ch <= '9') or ch == '.':\n ans += ch\n try:\n ans = float(ans)\n except:\n ans = math.inf\n return ans\n\ndef getCurrency(currency, price):\n \"\"\"\n The getCurrency function converts the prices listed in USD to user specified currency. \n Currently it supports INR, EURO, AUD, YUAN, YEN, POUND\n \"\"\"\n\n converted_cur = 0.0\n if len(price)>1 :\n if currency == \"inr\":\n converted_cur = 75 * int(price[(price.index(\"$\")+1):price.index(\".\")].replace(\",\",\"\"))\n elif currency == \"euro\":\n converted_cur = 1.16 * int(price[(price.index(\"$\")+1):price.index(\".\")].replace(\",\",\"\"))\n elif currency == \"aud\":\n converted_cur = 1.34 * int(price[(price.index(\"$\")+1):price.index(\".\")].replace(\",\",\"\"))\n elif currency == \"yuan\":\n converted_cur = 6.40 * int(price[(price.index(\"$\")+1):price.index(\".\")].replace(\",\",\"\"))\n elif currency == \"yen\":\n converted_cur = 114.21 * int(price[(price.index(\"$\")+1):price.index(\".\")].replace(\",\",\"\"))\n elif currency == \"pound\":\n converted_cur = 0.74 * int(price[(price.index(\"$\")+1):price.index(\".\")].replace(\",\",\"\"))\n converted_cur=currency.upper()+' '+str(converted_cur)\n return converted_cur\n","sub_path":"src/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"278225294","text":"# -*- coding: utf-8 -*- #\n# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Function for executing the tasks contained in a Task Iterator.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.command_lib.storage import optimize_parameters_util\nfrom googlecloudsdk.command_lib.storage import plurality_checkable_iterator\nfrom googlecloudsdk.command_lib.storage.tasks import task_graph_executor\nfrom googlecloudsdk.command_lib.storage.tasks import task_status\nfrom googlecloudsdk.core import properties\n\n\ndef _execute_tasks_sequential(task_iterator,\n received_messages=None,\n task_status_queue=None):\n \"\"\"Executes task objects sequentially.\n\n Args:\n task_iterator (Iterable[task.Task]): An iterator for task objects.\n received_messages (Iterable[task.Message]): Messages sent to each\n task in task_iterator.\n task_status_queue (multiprocessing.Queue|None): Used by task to report it\n progress to a central location.\n\n Returns:\n Iterable[task.Message] emitted by tasks in task_iterator.\n \"\"\"\n messages_from_current_task_iterator = []\n for task in task_iterator:\n if received_messages is not None:\n task.received_messages = received_messages\n task_output = task.execute(task_status_queue=task_status_queue)\n\n if task_output is None:\n continue\n\n if task_output.messages is not None:\n messages_from_current_task_iterator.extend(task_output.messages)\n\n if task_output.additional_task_iterators is not None:\n messages_for_dependent_tasks = []\n for additional_task_iterator in task_output.additional_task_iterators:\n messages_for_dependent_tasks = _execute_tasks_sequential(\n additional_task_iterator,\n messages_for_dependent_tasks,\n task_status_queue=task_status_queue)\n\n return messages_from_current_task_iterator\n\n\ndef should_use_parallelism():\n \"\"\"Checks execution settings to determine if parallelism should be used.\n\n This function is called in some tasks to determine how they are being\n executed, and should include as many of the relevant conditions as possible.\n\n Returns:\n True if parallel execution should be used, False otherwise.\n \"\"\"\n process_count = properties.VALUES.storage.process_count.GetInt()\n thread_count = properties.VALUES.storage.thread_count.GetInt()\n return process_count > 1 or thread_count > 1\n\n\ndef execute_tasks(task_iterator,\n parallelizable=False,\n task_status_queue=None,\n progress_type=None):\n \"\"\"Call appropriate executor.\n\n Args:\n task_iterator: An iterator for task objects.\n parallelizable (boolean): Should tasks be executed in parallel.\n task_status_queue (multiprocessing.Queue|None): Used by task to report its\n progress to a central location.\n progress_type (task_status.ProgressType|None): Determines what type of\n progress indicator to display.\n\n Returns:\n An integer indicating the exit_code. Zero indicates no fatal errors were\n raised.\n \"\"\"\n plurality_checkable_task_iterator = (\n plurality_checkable_iterator.PluralityCheckableIterator(task_iterator))\n optimize_parameters_util.detect_and_set_best_config(\n is_estimated_multi_file_workload=(\n plurality_checkable_task_iterator.is_plural()))\n\n # Some tasks operate under the assumption that they will only be executed when\n # parallelizable is True, and use should_use_parallelism to determine how they\n # are executed.\n if parallelizable and should_use_parallelism():\n exit_code = task_graph_executor.TaskGraphExecutor(\n plurality_checkable_task_iterator,\n max_process_count=properties.VALUES.storage.process_count.GetInt(),\n thread_count=properties.VALUES.storage.thread_count.GetInt(),\n task_status_queue=task_status_queue,\n progress_type=progress_type).run()\n else:\n with task_status.progress_manager(task_status_queue, progress_type):\n _execute_tasks_sequential(\n plurality_checkable_task_iterator,\n task_status_queue=task_status_queue)\n # TODO(b/188092601) Deterimine the exit_code in _execute_tasks_sequential.\n exit_code = 0\n return exit_code\n","sub_path":"google-cloud-sdk/lib/googlecloudsdk/command_lib/storage/tasks/task_executor.py","file_name":"task_executor.py","file_ext":"py","file_size_in_byte":4836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"213967240","text":"import numpy as np\nfrom cartesianSettings import *\n\n\n\n\n\nclass Node:\n\n def __init__(self):\n self.func = None\n self.inputs = [[]]\n self.weights = []\n\n\nclass CreatePop(object):\n\n def __init__(self, functionset, NumberColumns, NumberRows, InputsBack):\n self.node = [[] for i in range(NumberRows)]\n self.outputs = [[] for i in range(NumberOutputs)]\n self.fitness = None\n for i in range(NumberColumns):\n for j in range(NumberRows):\n self.node[j].append(self.createnode(i, NumberRows, functionset, InputsBack))\n for k in range(5):\n in_col = np.random.randint(max(0, NumberColumns - InputsBack), NumberColumns)\n self.outputs[k].append(in_col)\n print(self.outputs)\n in_row = np.random.choice(NumberRows)\n self.outputs[k].append(in_row)\n print(self.outputs)\n\n def createnode(self, i, NumberRows, functionset, InputsBack):\n node = Node()\n node.func = np.random.choice(functionset)\n # print(node.func.arity)\n node.inputs = [[] for z in range(node.func.arity)]\n for k in range(node.func.arity):\n in_col = np.random.randint(max(0, i - InputsBack), max(1, i))\n node.inputs[k].append(in_col)\n weight = (weightRange - (-weightRange)) * np.random.random_sample() + (-weightRange)\n node.weights.append(weight)\n if in_col == 0: # game info = 0 rember to use -1 later\n in_row = np.random.choice(NumberInputs)\n else:\n in_row = np.random.choice(NumberRows)\n node.inputs[k].append(in_row)\n\n return node\n","sub_path":"EvoManGP/evoman/cartesian.py","file_name":"cartesian.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"560344623","text":"from ase import Atoms\nfrom ase.io import read\ndef myread(filename, format=None):\n if format == 'GAUSSIAN_OUT':\n import cclib\n parser = cclib.io.ccopen(filename)\n data = parser.parse()\n atoms = Atoms(numbers = data.atomnos, positions = data.atomcoords[-1])\n else:\n atoms = read(filename=filename,format=format)\n if '.xsd' in filename:\n atoms.wrap()\n atoms.center()\n return atoms\n","sub_path":"io/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"97636742","text":"\n\n\n\n\n\n\n\n'''stmt=input(\"enter a statement\")\nword=input(\"enter a word\")\nif word in stmt:\n\tn=input(\"choose option:\")\n\tn=int(n)\n\tif n==1:\n \t\tnew= input(\"enter a new word:\")\n \t\tprint(stmt.replace(word, new))\n\telif n==2:\n \t\tprint(stmt.index(word))\n\telse:\n \t\tprint(\"not choosen correct value\")\nelse:\n \tprint(\"%s is not is statment\"%(word))\n'''\n\n'''a=input(\"enter a character\")\nb= input(\"enter another character\")\nc= ord(a)\nd=ord(b)\nif c2: \n\t\tprint(\"no of factors\" + str(c))\n\t\tprint(str(n)+ \"is composite\")\n\telse:\n\t\tprint(str(n)+ \"is prime\")'''\n\n'''n=8\nfor j in range(1,n+1):\n\tfor i in range(1,j+1):\n\t\tprint(i,end=\"\")\n\t\tprint()'''\n'''n=5\nfor i in range(1,n+1):\n\tprint(n,'x',i,'=',n*i)'''\n\n'''stmt =\"python is an easy programming language\"\nchar=\"n\"\nfor i in range(len(stmt)):\n\tif stmt[i] == char:\n\t\tprint(i)'''\n\n'''stmt=\"python is an esay programming language\"\nvowels=\"aeiou\"\nfor i in stmt:\n\tif i in vowels:\n\t\tprint(i)'''\n\nn=5\nfor j in range(1,n+1):\n\tfor i in range(ord(\"a\"),ord(\"a\")+j):\n\t\tprint(chr(i),end=\"\")\n\t\tprint()\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"geetha02.py","file_name":"geetha02.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"9411376","text":"#!/usr/bin/python\n\nfrom time import time\nimport subprocess, os\n\ndef test():\n\tt0=time()\n\tp = subprocess.Popen((\"../../src/crypto_hybrid.exe\", \"e\", \"../../src/orig.txt\", \"../../src/encrypted.txt\")); \n\tp.wait();\n\treturn time()-t0\n\nos.system(\"echo %s\" % ((str(test()))))\n","sub_path":"src/tests/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"222314991","text":"import logging\n\n\ndef get_extent(view):\n sy, sx = [s for s in view if isinstance(s, slice)]\n return (sx.start, sx.stop, sy.start, sy.stop)\n\n\ndef view_cascade(data, view):\n \"\"\" Return a set of views progressively zoomed out of input at roughly\n constant pixel count\n\n :param data: Data object to view\n :param view: Original view into data\n\n :rtype: tuple of views\n \"\"\"\n shp = data.shape\n v2 = list(view)\n logging.debug(\"image shape: %s, view: %s\", shp, view)\n\n #choose stride length that roughly samples entire image\n #at roughly the same pixel count\n step = max(shp[i - 1] * v.step / max(v.stop - v.start, 1)\n for i, v in enumerate(view) if isinstance(v, slice))\n step = max(step, 1)\n\n for i, v in enumerate(v2):\n if not(isinstance(v, slice)):\n continue\n v2[i] = slice(0, shp[i - 1], step)\n\n return tuple(v2), view\n","sub_path":"glue/clients/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"38045236","text":"#script, filename, output = argv\n\n#inFile = open(filename, \"r\")\n\ns_Square = raw_input()#inFile.readline()\nt_Square = raw_input()#inFile.readline()\n\n#inFile.close()\n\nx_change = ord(t_Square[0]) - ord(s_Square[0])\ny_change = int(t_Square[1]) - int(s_Square[1])\n\ntotal_moves = 0\ncstr = \"\"\n\nwhile (x_change > 0):\n x_change-=1\n cstr+=\"R\"\n if y_change > 0:\n y_change-=1\n cstr+=\"U\"\n elif y_change < 0:\n y_change+=1\n cstr+=\"D\"\n cstr+=\"\\n\"\n total_moves+=1\n\nwhile (x_change < 0):\n x_change-=1\n cstr+=\"L\"\n if y_change > 0:\n y_change-=1\n cstr+=\"U\"\n elif y_change < 0:\n y_change+=1\n cstr+=\"D\"\n cstr+=\"\\n\"\n total_moves+=1\n\nwhile (y_change > 0):\n y_change-=1 \n cstr+=\"U\\n\"\n total_moves+=1\n\nwhile (y_change < 0):\n y_change+=1\n cstr+=\"D\\n\"\n total_moves+=1\n\nfinal_string = str(total_moves) + \"\\n\" + cstr\n\n#outFile = open(output, \"w\")\n#outFile.write(final_string)\n#outFile.close()\nprint(final_string)\n","sub_path":"MAC/King-Shortest-Path/KingMe.py","file_name":"KingMe.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"527564213","text":"from pybring import BringApi\nimport pandas as pd\nimport numpy as np\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n# read google sheet\n# tuto https://towardsdatascience.com/how-to-integrate-google-sheets-and-jupyter-notebooks-c469309aacea\nspreadsheet_key = r'1aTqsmCbfN8moxBcSwIp1BMXEQDIKRTG9CXWDik51ZZI'\ncredentials_file = r'/app//credentials/bringapi-92345de4ed45.json'\nscope = ['https://spreadsheets.google.com/feeds']\n\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(credentials_file, scope)\ngc = gspread.authorize(credentials)\n\nbook = gc.open_by_key(spreadsheet_key)\nworksheet = book.worksheet(\"Bring!\")\ntable = worksheet.get_all_values()\ntable = np.array(table)\n\n# create DataFrame\ndf = pd.DataFrame(data={'item': table[:, 0], 'quantity': table[:, 1]})\ndf = df[df['item'] != ''].copy()\ndf['quantity'] = df['quantity'].astype(float)\n\ndf['unit'] = df['item'].str.extract(r'(\\(.+\\))')\ndf['unit'] = df['unit'].str[1:-1].values\n\nn_i = []\nfor i, u in zip(df['item'].tolist(), df['unit'].tolist()):\n n_i.append(i.replace(' ({unit})'.format(unit=u), ''))\ndf['item'] = n_i\n\nreduced_df = df[df['quantity'] != 0]\n\n# Connect to bring\nb = BringApi(uuid='mike.claure@gmail.com', bringuuid='bringMike2021', use_login=True)\n\nfor index in range(len(reduced_df)):\n item_i = reduced_df.values[index][0]\n quantity_i = reduced_df.values[index][1]\n unit_i = reduced_df.values[index][2]\n \n b.purchase_item(item=item_i, specification='{} {}'.format(quantity_i, unit_i))\n\n\n\n","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"644251506","text":"from functools import lru_cache\nimport logging\nimport datetime\n\nfrom abc import abstractmethod\n\nfrom RestrictedPython import (\n safe_builtins,\n utility_builtins,\n limited_builtins,\n compile_restricted\n)\nfrom RestrictedPython.Eval import default_guarded_getitem\nfrom RestrictedPython.Guards import (\n full_write_guard,\n guarded_iter_unpack_sequence,\n guarded_unpack_sequence\n)\n\n\nextra_builtins = {\n 'datetime': datetime,\n 'sorted': sorted,\n 'min': min,\n 'max': max,\n 'sum': sum,\n 'any': any,\n 'all': all,\n}\n\nbuiltins = safe_builtins.copy()\nbuiltins.update(utility_builtins)\nbuiltins.update(limited_builtins)\nbuiltins.update(extra_builtins)\n\n\ndef safer_getattr(object, name, default=None, getattr=getattr):\n '''Getattr implementation which prevents using format on string objects.\n\n format() is considered harmful:\n http://lucumr.pocoo.org/2016/12/29/careful-with-str-format/\n\n '''\n if name == 'format' and isinstance(object, str):\n raise NotImplementedError(\n 'Using format() on a %s is not safe.' % object.__class__.__name__)\n if name[0] == '_':\n raise AttributeError(\n '\"{name}\" is an invalid attribute name because it '\n 'starts with \"_\"'.format(name=name)\n )\n val = getattr(object, name, default)\n return val\n\n\nrestricted_globals = {\n \"__builtins__\": builtins,\n \"_getattr_\": safer_getattr,\n \"_write_\": full_write_guard,\n \"_getiter_\": iter,\n \"_getitem_\": default_guarded_getitem,\n \"_iter_unpack_sequence_\": guarded_iter_unpack_sequence,\n \"_unpack_sequence_\": guarded_unpack_sequence\n}\n\nlogger = logging.getLogger(__name__)\n\n\n# CODE_FORMAT = \"\"\"{expr})\"\"\"\n\n\nclass Query:\n def __init__(\n self,\n expr: str\n ):\n self.expr = expr\n\n @abstractmethod\n def match(\n self,\n run,\n metric=None\n ) -> bool:\n ...\n\n def __call__(\n self,\n run,\n metric=None\n ):\n return self.match(run=run,\n metric=metric)\n\n\n@lru_cache(maxsize=100)\ndef compile_checker(expr):\n source_code = expr\n byte_code = compile_restricted(source_code,\n filename='',\n mode='eval')\n return byte_code\n\n\ndef syntax_error_check(expr):\n if not expr:\n return\n expr = strip_query(expr)\n try:\n compile_restricted(expr,\n filename='',\n mode='eval')\n except SyntaxError:\n compile(expr, filename='', mode='eval')\n\n\n@lru_cache(maxsize=100)\ndef strip_query(query: str) -> str:\n import re\n stripped_query = query.strip()\n # cut the hardcoded part (SELECT something IF)\n if query.lower().startswith('select'):\n try:\n stripped_query = re.split('if',\n query,\n maxsplit=1,\n flags=re.IGNORECASE)[1]\n except IndexError:\n stripped_query = ''\n\n if stripped_query:\n stripped_query = f'({stripped_query.strip()})'\n\n return stripped_query\n\n\n@lru_cache(maxsize=100)\ndef query_add_default_expr(query: str) -> str:\n default_expression = 'run.archived == False'\n # add the default expression to the query if needed\n if not query:\n return default_expression\n else:\n if 'run.archived' not in query:\n return f'{default_expression} and {query}'\n else:\n return query\n\n\nclass RestrictedPythonQuery(Query):\n def __init__(\n self,\n query: str\n ):\n stripped_query = strip_query(query)\n expr = query_add_default_expr(stripped_query)\n super().__init__(expr=expr)\n self._checker = compile_checker(expr)\n self.run_metadata_cache = None\n\n def eval(\n self,\n run,\n metric\n ):\n namespace = dict(run=run, metric=metric, **restricted_globals)\n return eval(self._checker, restricted_globals, namespace)\n\n def __bool__(\n self\n ) -> bool:\n return bool(self.expr)\n\n def match(\n self,\n run,\n metric=None\n ) -> bool:\n\n # TODO enforce immutable\n try:\n return self.eval(run=run, metric=metric)\n except BaseException as e:\n logger.warning('query failed, %s', e)\n return False\n","sub_path":"aim/storage/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":4461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"385852456","text":"#Imports\nimport copy\nfrom tkinter import *\n\n#Variables\nlist1=['left']\nlist2=[]\nfinal=[]\nNangle=0\nEdges=[]\nDirection='up'\nlength=int(input(\"iteration:\"))-2\nzoom=int(input(\"zoom:\"))\n\n#Functions\ndef Add_Edges(x1,y1,x2,y2,angle):\n Edges.append([x1,y1,x2,y2,angle])\n\ndef Center_Change(list2):\n if list2[int(((len(list2))/2)-0.5)] == 'left':\n list2[int(((len(list2))/2)-0.5)] = 'right'\n elif list2[int(((len(list2))/2)-0.5)] == 'right':\n list2[int(((len(list2))/2)-0.5)] = 'left'\n\ndef List_Gen():\n global list1\n for x in range(0,length):\n list2=copy.copy(list1)\n Center_Change(list2)\n list1.append('left')\n list1=copy.copy(list1+list2)\n final=copy.copy(list1)\n global final\n\ndef Canvas_Gen():\n global final\n i=0\n \n for x in final:\n i=i+1\n\n if x=='left':\n Nangle=Edges[i-1][4]-90\n if x=='right':\n Nangle=Edges[i-1][4]+90\n\n if Nangle==-90:\n Direction=270\n elif Nangle==360:\n Direction=0\n else:\n Direction=Nangle\n \n px2=Edges[i-1][2]\n py2=Edges[i-1][3]\n\n if Direction==0:\n x1=px2\n y1=py2\n x2=px2\n y2=py2-zoom\n if Direction==270:\n x1=px2\n y1=py2\n x2=px2-zoom\n y2=py2\n if Direction==180:\n x1=px2\n y1=py2\n x2=px2\n y2=py2+zoom\n if Direction==90:\n x1=px2\n y1=py2\n x2=px2+zoom\n y2=py2\n \n Add_Edges(x1,y1,x2,y2,Direction)\n\ndef Draw():\n i=0\n for x in Edges:\n if i==0:\n colour='red'\n else:\n colour='black'\n dx1=Edges[i][0]\n dy1=Edges[i][1]\n dx2=Edges[i][2]\n dy2=Edges[i][3]\n canvas.create_line(dx1,dy1,dx2,dy2,fill=colour)\n i=i+1 \n\n#Start\nprint(\"Starting Setup!\")\n'''grid=[[0 for x in range(0,10000)]for x in range(0,10000)]'''\nEdges=[[0 for x in range(0,5)]for x in range(0,0)]\nAdd_Edges(500,500,500+zoom,500,90)\ntk=Tk()\ncanvas=Canvas(tk, width=1000, height=1000)\ncanvas.pack()\nprint(\"Done Setup!\")\n\n#Program\nList_Gen()\nprint(\"List Generated!\")\nCanvas_Gen()\nprint(\"Canvas List Generated!\")\nDraw()\nprint(\"Done!\")\n\n","sub_path":"Fractals/Fractal3.py","file_name":"Fractal3.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"178357955","text":"# -*- coding: utf-8 -*-\nfrom bson import ObjectId\nimport models\nimport common\nglobal lock\nimport threading\nlock = threading.Lock()\nimport datetime\n#import TM_SetupProcessApproveLevel\nimport qmongo\ndef get_list_with_process_id(args):\n try:\n if args['data'] != None and args['data'].has_key('process_id'):\n items = qmongo.models.TM_SetupProcessApproverSubstitute.aggregate.project(\n process_id=1,\n process_code=1,\n substitute_code=1,\n from_date=1,\n to_date=1,\n note=1\n ).match(\"process_id == {0}\", args['data']['process_id'])\n return items.get_item()\n raise(Exception(\"not found process_id\"))\n except Exception as ex:\n raise(ex)\n\ndef get_list_with_searchtext(args):\n searchText = args['data'].get('search', '')\n pageSize = args['data'].get('pageSize', 0)\n pageIndex = args['data'].get('pageIndex', 20)\n sort = args['data'].get('sort', 20)\n\n pageIndex = (lambda pIndex: pIndex if pIndex != None else 0)(pageIndex)\n pageSize = (lambda pSize: pSize if pSize != None else 20)(pageSize)\n\n items = qmongo.models.TM_SetupProcessApproverSubstitute.aggregate\n items.left_join(qmongo.models.HCSEM_Employees, \"process_code\", \"employee_code\", \"emp\")\n items.left_join(qmongo.models.HCSEM_Employees, \"substitute_code\", \"employee_code\", \"em\")\n items.left_join(qmongo.models.auth_user_info, \"created_by\", \"username\", \"uc\")\n items.left_join(qmongo.models.auth_user_info, \"modified_by\", \"username\", \"um\")\n items.project(\n process_id=1,\n process_code=1,\n substitute_code=1,\n from_date=1,\n to_date=1,\n note=1,\n created_by=\"uc.login_account\",\n created_on=\"created_on\",\n modified_on=\"switch(case(modified_on!='',modified_on),'')\",\n modified_by=\"switch(case(modified_by!='',um.login_account),'')\",\n process_name=\"concat(emp.last_name, ' ', emp.first_name)\",\n substitute_name=\"concat(em.last_name, ' ', em.first_name)\",\n )\n\n items.match(\"process_id == {0}\", args['data']['process_id'])\n\n if(searchText != None) :\n items.match(\"contains(process_code, @name) or contains(process_name, @name) or \" + \\\n \"contains(substitute_code, @name) or contains(substitute_name, @name) or \" + \\\n \"contains(frsend_email_once_from_hourom_date, @name) or contains(to_date, @name)\", name=searchText)\n\n if(sort != None):\n items.sort(sort)\n \n return items.get_page(pageIndex, pageSize)\n\ndef insert(args):\n # if args['data'] != None:\n # ret = models.TM_SetupProcessApproverSubstitute().insert(args['data'])\n # return ret\n # return None\n try:\n lock.acquire()\n ret = {}\n if args['data'] != None:\n data = set_dict_insert_data(args)\n ret = qmongo.models.TM_SetupProcessApproverSubstitute.insert(data)\n lock.release()\n return ret\n\n lock.release()\n return dict(\n error = \"request parameter is not exist\"\n )\n except Exception as ex:\n lock.release()\n raise(ex)\n\ndef save(args):\n try:\n lock.acquire()\n ret = {}\n if args['data'] != None:\n data = set_dict_insert_data(args)\n ret = qmongo.models.TM_SetupProcessApproverSubstitute.insert(data)\n lock.release()\n return ret\n lock.release()\n return dict(\n error = \"request parameter is not exist\"\n )\n except Exception as ex:\n lock.release()\n raise(ex)\n\ndef update(args):\n process_id = \"\"\n if args['data'] != None:\n if args['data']['process_id'] == None:\n return None\n else:\n if (args['data'].has_key('process_id')):\n process_id = args['data']['process_id']\n args['data'].pop('process_id')\n ret = qmongo.models.TM_SetupProcessApproverSubstitute.update(\n args['data'],\n \"process_id==@process_id\",\n dict(\n process_id=process_id\n ))\n return ret\n return None\n\ndef delete(args):\n if args['data'] != None:\n # ret = models.TM_SetupProcessApproverSubstitute().delete(\"process_code in {0}\", [x[\"process_code\"] for x in args['data']])\n ret = qmongo.models.TM_SetupProcessApproverSubstitute.delete(\n \"_id in {1}\",\n args['data']['process_id'],\n [ObjectId(x) for x in args['data']['_id']])\n return ret\n return None\n\ndef set_dict_insert_data(args):\n data = dict()\n data.update(\n process_id = args['data']['process_id'],\n process_code = (lambda x: x['process_code'] if x.has_key('process_code') else None)(args['data']),\n substitute_code = (lambda x: x['substitute_code'] if x.has_key('substitute_code') else None)(args['data']),\n from_date = (lambda x: x['from_date'] if x.has_key('from_date') else None)(args['data']),\n to_date = (lambda x: x['to_date'] if x.has_key('to_date') else None)(args['data']),\n note = (lambda x: x['note'] if x.has_key('note') else None)(args['data']),\n )\n return data\n\n\n","sub_path":"apps/performance/api/TM_SetupProcessApproverSubstitute.py","file_name":"TM_SetupProcessApproverSubstitute.py","file_ext":"py","file_size_in_byte":5305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"110365068","text":"# -----------------------------------------------------------------------------------------------------------------\r\n# ~~File Name: ticket_synchronizer.py\r\n# ~~Abstract: This script is responsible for updating tickets in a JIRA system based on imported data. After\r\n# the chosen fields are updated from the database, a report is created in an outside script.\r\n#\r\n# -----------------------------------------------------------------------------------------------------------------\r\nimport pyodbc\r\nimport traceback as tr\r\nimport Error\r\nimport JiraConnection\r\n\r\n\r\nclass _Singleton:\r\n \"\"\"\r\n Superclass of TicketSynchronizer. Enforces only one TicketSynchronizer being created at once.\r\n Note: Do not subclass this Singleton from other modules, as this other subclass would also overwrite the\r\n TicketSynchronizer.\r\n \"\"\"\r\n _instance = {}\r\n\r\n def __init__(self):\r\n self.__dict__ = _Singleton._instance\r\n # the above will essentially overwrite the pointer every time a new class using this method is created\r\n\r\n\r\nclass _Sql:\r\n \"\"\"\r\n This class is only to be used statically for referencing the various sql commands that are run against the\r\n database. It is implemented as a _Name class in order to discourage usage from outside this module.\r\n \"\"\"\r\n def __init__(self):\r\n pass # class is never expected to be initialized\r\n\r\n # -------------\r\n # Static Variables\r\n # -------------\r\n assignee_check = \"\"\"\r\n select distinct ext.assignee from dbo.operational_table ext left outer join dbo.cwd_user int\r\n on ext.assignee = int.user_name\r\n where project_name = ?\r\n and int.user_name is null --i.e. internal user does not exist\r\n \"\"\"\r\n\r\n # the sql query that will return all of the records in the operational table\r\n get_client_tickets = \"\"\"\r\n select number, assignee, state,\r\n project_name, customer_system,\r\n priority,\r\n import_date,\r\n open_date, resolve_date\r\n from dbo.operational_table\r\n where Project_Name = ?\r\n and is_incident = ?\r\n \"\"\"\r\n\r\n # the sql query that will pull the matching jira ticket based on the client ticket number\r\n # results will be jiraissuenum, username, state, open date, close date\r\n find_jira_ticket = \"\"\"\r\n select i.issuenum, u.lower_user_name, s.pname, i.created, i.resolutiondate\r\n from dbo.jiraissue i inner join dbo.customfieldvalue c\r\n on (c.issue = i.id AND c.customfield = 10107)\r\n inner join dbo.issuestatus s\r\n on (s.id = i.issuestatus)\r\n inner join dbo.cwd_user u\r\n on (u.lower_user_name = i.assignee)\r\n where LTRIM(RTRIM(c.stringvalue)) = ?\r\n and i.project = ?\r\n order by i.issuenum\r\n \"\"\"\r\n\r\n # the sql query that will update the operational table ticket to link to the internal ticket\r\n relate_client_ticket = \"\"\"\r\n update dbo.operational_table\r\n set jira_issuenum = ?\r\n where Project_Name = ?\r\n and Number = ?\r\n \"\"\"\r\n\r\n # the sql that will update the open date\r\n opened_update = \"\"\"\r\n update dbo.jiraissue\r\n set created = ?\r\n where project = ?\r\n and issuenum = ?\r\n \"\"\"\r\n\r\n # the sql that will update the resolve date\r\n closed_update = \"\"\"\r\n update dbo.jiraissue\r\n set resolutiondate = ?\r\n where project = ?\r\n and issuenum = ?\r\n \"\"\"\r\n\r\n # the sql that will update the assignee\r\n update_assignee = \"\"\"\r\n update dbo.jiraissue\r\n set assignee = ?\r\n where project = ?\r\n and issuenum = ?\r\n \"\"\"\r\n\r\n # the sql to pull the project id\r\n project_pkid = \"\"\"\r\n select project_id from project_key where project_key = ?\r\n \"\"\"\r\n\r\n\r\nclass TicketSynchronizer(_Singleton):\r\n \"\"\"\r\n Responsible for synchronizing client ticket data with JIRA data.\r\n The class _Sql is used statically to refer to all SQL queries directed at the database.\r\n Expected usage is in a with block. E.g., to synchronize tickets in your code, use:\r\n with TicketSynchronizer(proj, bool) as sync:\r\n sync.do()\r\n del sync # immediately free up some of those high-memory resources\r\n \"\"\"\r\n\r\n #########\r\n # Class Constants\r\n #########\r\n SCRIPT_WIDE_ERROR_MSG = 'Error in TicketSync.py: '\r\n debug = False\r\n\r\n ##########\r\n # Methods\r\n ##########\r\n\r\n def __init__(self, project_code, is_incident):\r\n \"\"\"\r\n Initialize the TicketSynchronizer object with the project_code and is_incident values.\r\n Initializes the object variables that will be used throughout the script, as well as obtaining the Loggers.\r\n :param project_code: This is a character identifier of the project being synchronized.\r\n :param is_incident: This is a boolean value that is used to filter the tickets to sync.\r\n \"\"\"\r\n _Singleton.__init__(self)\r\n self.project_code = project_code\r\n self.is_incident = is_incident\r\n\r\n # Instantiate loggers\r\n self.error_object = Error.ErrorReporting()\r\n self.log_object = Error.LogReporting()\r\n\r\n # Initialize variables that will be set in other methods\r\n self.bad_assignees = None\r\n self.client_tickets = None\r\n self.project_id = None\r\n self.cursor = None\r\n self.conn = None\r\n\r\n # Log that ticketsync has been created\r\n issue_type = 'Incidents' if self.is_incident else 'Tasks'\r\n log_message = 'TicketSynchronizer was created for ' + self.project_code + ' ' + issue_type\r\n self.log_object.write_log(log_message)\r\n\r\n # Get DB access\r\n try:\r\n self.conn = JiraConnection.get_write_connection()\r\n self.cursor = self.conn.cursor()\r\n except pyodbc.DatabaseError:\r\n error_message = 'Error in database creation:\\n' + tr.format_exc()\r\n self.error_object.write_error(error_message)\r\n except Exception: # other error\r\n error_message = 'Error in TicketSync init:\\n' + tr.format_exc()\r\n self.error_object.write_error(error_message)\r\n\r\n def __enter__(self):\r\n \"\"\"\r\n Method for use in the with keyword.\r\n :return: self\r\n \"\"\"\r\n return self\r\n\r\n def __exit__(self, exc_type, exc_val, exc_tb):\r\n \"\"\"\r\n Method executed when a with block is exited.\r\n :param exc_type:\r\n :param exc_val:\r\n :param exc_tb:\r\n :return: None\r\n \"\"\"\r\n self.close()\r\n\r\n def close(self):\r\n \"\"\"\r\n Close all opened resources.\r\n :return: None\r\n \"\"\"\r\n if self.cursor is not None:\r\n self.cursor.close()\r\n if self.conn is not None:\r\n self.conn.close()\r\n\r\n # TODO add in check for JIRA\r\n def __bool__(self):\r\n return self.cursor is None\r\n\r\n def obtain_data(self):\r\n \"\"\"\r\n This method will query the database to find the following values:\r\n self.bad_assignees := a set object containing a list of all assignees returned by assignee_check_sql\r\n self.project_id := the project's PKID\r\n self.client_tickets := an array of pyodbc.Row objects of client ticket information pulled from the DB\r\n \"\"\"\r\n c = self.cursor\r\n c.execute(_Sql.assignee_check, self.project_code)\r\n # create a set of all the external assignees that do not exist internally\r\n self.bad_assignees = set(\r\n [record.assignee for record in c.fetchall()]\r\n )\r\n\r\n # find the project ID used internally in the jira db\r\n result = c.execute(_Sql.project_pkid, self.project_code)\r\n self.project_id = result.fetchone()[0]\r\n\r\n # grab data inserted into the operational table\r\n c.execute(_Sql.get_client_tickets, self.project_code, str(self.is_incident))\r\n self.client_tickets = c.fetchall()\r\n\r\n def find_related_ticket(self, client_ticket):\r\n \"\"\"\r\n Identifies the related ticket in JIRA based on the customer id field.\r\n :constant _Sql.find_jira_ticket: the query used to identify the ticket.\r\n :constant _Sql.relate_client_ticket: the query used to update the related ticket in the client ticket table\r\n :param client_ticket: the pyodbc.Row object containing the client ticket\r\n :return linked_ticket: A pyodbc.Row object if found, None if unable to locate.\r\n \"\"\"\r\n c = self.cursor\r\n ext_id = str(client_ticket.number)\r\n c.execute(_Sql.find_jira_ticket, ext_id, self.project_id)\r\n\r\n all_matching_tickets = c.fetchall()\r\n if len(all_matching_tickets) > 0: # there was at least one match in JIRA\r\n earliest_record = all_matching_tickets[0]\r\n linked_ticket = earliest_record # currently linking based on the earliest ticket with that ID\r\n else: # no ticket found\r\n linked_ticket = None\r\n return linked_ticket\r\n\r\n def sync_tickets(self, client_ticket, linked_ticket):\r\n \"\"\"\r\n Synchronizes the client and linked tickets. Currently updating:\r\n - open date\r\n - close date\r\n :param client_ticket: The pyodbc.Row object containing the client ticket.\r\n :param linked_ticket: The pyodbc.Row object containing the JIRA ticket.\r\n :return: None\r\n \"\"\"\r\n int_num = linked_ticket.issuenum\r\n assert int_num is not None\r\n\r\n jira_identifier = self.project_code + '-' + str(int_num)\r\n ext_id = str(client_ticket.number)\r\n c = self.cursor\r\n try:\r\n c.execute(_Sql.relate_client_ticket, int_num, self.project_code, ext_id)\r\n except pyodbc.DatabaseError:\r\n error_message = tr.format_exc()\r\n self.error_object.write_error(error_message)\r\n\r\n # once tickets have been related, let's compare the dates to see if we need to update\r\n bool_open_match = linked_ticket.created == client_ticket.open_date\r\n bool_close_match = linked_ticket.resolutiondate == client_ticket.resolve_date\r\n\r\n try: # Attempt to update the dates through the database. Log this activity.\r\n if not bool_open_match:\r\n log_message = 'Updating Open_Date on ' + jira_identifier + \\\r\n ' to ' + str(client_ticket.open_date)\r\n self.log_object.write_log(log_message)\r\n\r\n c.execute(_Sql.opened_update, client_ticket.open_date, self.project_id, int_num)\r\n if not bool_close_match:\r\n log_message = 'Updating Resolve_Date on ' + jira_identifier + \\\r\n ' to ' + str(client_ticket.resolve_date)\r\n self.log_object.write_log(log_message)\r\n\r\n c.execute(_Sql.closed_update, client_ticket.resolve_date, self.project_id, int_num)\r\n\r\n except pyodbc.DatabaseError:\r\n log_message = 'Update failed on '\r\n error_message = tr.format_exc()\r\n self.error_object.write_error(error_message)\r\n\r\n def do(self):\r\n import Email as em\r\n import Daily_Export\r\n\r\n self.obtain_data()\r\n\r\n try:\r\n c = self.cursor\r\n assert c is not None\r\n assert self.conn is not None\r\n assert self.client_tickets is not None\r\n\r\n for client_ticket_row in self.client_tickets:\r\n linked_ticket = self.find_related_ticket(client_ticket_row)\r\n if linked_ticket is not None:\r\n try:\r\n self.sync_tickets(client_ticket_row, linked_ticket)\r\n except AssertionError:\r\n error_message = 'Programming Error in TicketSync.py: ' + tr.format_exc()\r\n self.error_object.write_error(error_message)\r\n\r\n if not self.debug:\r\n c.commit()\r\n\r\n daily_view = Daily_Export.DailyExport()\r\n daily_view.run(self.project_code)\r\n\r\n em.emailSyncCompletion(True, self.project_code, '', '')\r\n except AssertionError:\r\n error_message = 'TicketSync.py Error: ' + tr.format_exc()\r\n self.error_object.write_error(error_message)\r\n except Exception:\r\n error_message = tr.format_exc()\r\n error_message += \"\\n\" + \"-------------------------\"\r\n self.error_object.write_error(error_message)\r\n em.emailSyncCompletion(False, self.project_code, '', '')\r\n finally: # tidy up anything that is open\r\n self.close()\r\n\r\n\r\n# if run on its own, debug with Foo incidents\r\n\r\nif __name__ == \"__main__\":\r\n sync = TicketSynchronizer(is_incident=True, project_code='Foo')\r\n sync.debug = True\r\n sync.do()\r\n","sub_path":"ticket_synchronizer.py","file_name":"ticket_synchronizer.py","file_ext":"py","file_size_in_byte":12796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"492817201","text":"import cv2 as cv\nimport numpy as np\n\ndef detect_hand(img_hand):\n '''\n Detect hand from image\n Parameters:\n -----------\n img_hand: input image\n \n Return:\n -------\n hand_mask: results with 255 for hand pixel and 0 for background \n '''\n hand_mask = np.zeros((img_hand.shape[0], img_hand.shape[1], 1), dtype = np.uint8)\n hand_mask=cv.cvtColor(img_hand,cv.COLOR_BGR2GRAY) # convert to grayscale\n hand_mask= cv.GaussianBlur(hand_mask,(5,5),0)\n ret,hand_mask= cv.threshold(hand_mask,700,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)\n hand_mask[:int(0.25*hand_mask.shape[0])]=0\n hand_mask[:,int(0.7*hand_mask.shape[1]):]=0\n hand_mask[int(0.9*hand_mask.shape[0]):,int(0.6*hand_mask.shape[1]):]=0\n return hand_mask\n\nif __name__ == '__main__':\n video = cv.VideoCapture('hand.avi') \n \n if (video.isOpened()== False): \n print(\"Error opening video stream or file\")\n # Read until video is completed\n i = 0\n while(video.isOpened()):\n # Capture frame-by-frame\n ret, frame = video.read()\n if ret == True:\n # Display the resulting frame\n cv.imshow('Original Video',frame)\n # implement your detect_hand function\n hand_mask = detect_hand(frame)\n cv.imshow('Detection Results',hand_mask) \n cv.imwrite('hand_mask_' + str(i) + '.bmp',hand_mask)\n i += 1 \n # Press Q on keyboard to exit\n if cv.waitKey(25) & 0xFF == ord('q'):\n break\n else: \n break\n# When everything done, release the video capture object\n video.release()\n\n # Closes all the frames\n cv.destroyAllWindows()\n","sub_path":"HandDetect/HandDetect.py","file_name":"HandDetect.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"175770079","text":"'''\nqubit class\n\nUsing qubit class, the user can create qubit objects. When creating a qubit object, the amplitudes\nmust satisfy that |alpha|^2 + |beta|^2 = 1.\n\nInstances of qubit class have the following methods:\n\n- __init__() - initialize qubit\n- get_alpha() - getter of alpha\n- get_beta() - getter of beta\n- set_amplitudes() - setter of alpha, beta\n- show() - qubit representation\n- measure() - measure qubit\n- ket() - return the ket vector of qubit\n- bra() - return the bra vector of qubit\n\nThe random_qubit class is the same as qubit class the only difference that an instance of the\nclass is created with random amplitudes (alpha, beta). They share the same methods.\n'''\n\n# pylint: disable=E1101\n\nimport check_qubit\nimport numpy\nimport unicodedata\n\nclass Qubit(object):\n ''' qubit class '''\n\n @check_qubit.qubit_init_check\n def __init__(self, alpha, beta):\n ''' initialize qubit '''\n\n self.__alpha = alpha\n self.__beta = beta\n\n def get_alpha(self):\n ''' getter of alpha '''\n\n return self.__alpha\n\n def get_beta(self):\n ''' getter of beta '''\n\n return self.__beta\n\n @check_qubit.set_amplitudes_check\n def set_amplitudes(self, alpha, beta):\n ''' setter of alpha, beta '''\n\n self.__alpha = alpha\n self.__beta = beta\n\n def show(self):\n ''' qubit representation '''\n\n return '|' + unicodedata.lookup('GREEK CAPITAL LETTER PSI') + '> = ' +\\\n '({0:.4f}{1}{2:.4f}i)'.format(self.__alpha.real, '+-'[self.__alpha.imag < 0], \\\n abs(self.__alpha.imag)) + '|0> + ' + '({0:.4f}{1}{2:.4f}i)'.format(self.__beta.real, \\\n '+-'[self.__beta.imag < 0], abs(self.__beta.imag)) + '|1>'\n\n def measure(self):\n ''' measure the qubit '''\n\n result = numpy.random.choice([0, 1], p=[abs(self.__alpha) ** 2, abs(self.__beta) ** 2])\n if result == 0:\n self.__alpha = 1\n self.__beta = 0\n\n else:\n self.__alpha = 0\n self.__beta = 1\n \n return int(result)\n\n def ket(self):\n ''' return the ket vector of the qubit '''\n\n ket = numpy.array([self.__alpha, self.__beta])\n ket.shape = (2, 1)\n return ket\n\n def bra(self):\n ''' return the bra vector of the qubit '''\n\n bra = self.ket().transpose()\n return bra\n\nclass Random_Qubit(Qubit):\n ''' random qubit class '''\n\n def __init__(self):\n ''' initialize random qubit '''\n\n Qubit.__init__(self, 1, 0)\n\n alpha = numpy.random.uniform(0, 1)\n alpha1 = numpy.random.choice([-1, 1]) * numpy.sqrt(numpy.random.uniform(0, alpha))\n alpha2 = numpy.random.choice([-1, 1]) * numpy.sqrt(alpha - alpha1 ** 2)\n\n beta1 = numpy.random.choice([-1, 1]) * numpy.sqrt(numpy.random.uniform(0, 1 - alpha))\n beta2 = numpy.random.choice([-1, 1]) * numpy.sqrt(1 - alpha - beta1 ** 2)\n\n # super().set_amplitudes(complex(alpha1, alpha2), complex(beta1, beta2))\n super(Random_Qubit, self).set_amplitudes(complex(alpha1, alpha2), complex(beta1, beta2))\n","sub_path":"qvantum/qubit.py","file_name":"qubit.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"488216246","text":"import os, sys\nimport time\nimport datetime\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"logviewer.settings\")\nimport django\n\ndjango.setup()\n\nfrom logviewer.settings import WATCH_PATH, XSL_WATCH_PATH\nfrom extensions.mongo_conf import db, XSL_ACCESS, XSL_API_ACCESS, XSL_EYAOS_ERR\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\n\ndef save_data(log_data, collection):\n \"\"\" 保存到mongo db\"\"\"\n for log in log_data[::-1]:\n data = {\n \"log\": log,\n \"create_date\": datetime.datetime.now()\n }\n collection.insert(data)\n\n\ndef get_last_log(collection):\n \"\"\" 获取最新的日志信息\"\"\"\n\n last_log = collection.find({}).sort(\"create_date\", -1).limit(1)\n log = None\n for i in list(last_log):\n log = i['log']\n\n return log\n\n\ndef deal_log_file(log_path):\n \"\"\"获取某个日志内容\"\"\"\n log_name = log_path.split('/')[-1]\n\n if log_name == 'access.log':\n collection = XSL_ACCESS\n elif log_name == 'api_access.log':\n collection = XSL_API_ACCESS\n elif log_name == 'eyaos-stderr.log':\n collection = XSL_EYAOS_ERR\n\n else:\n collection = db.default\n\n data_list = []\n last_log = get_last_log(collection)\n with open(log_path, 'r') as file:\n lines = file.readlines()\n for line in lines[::-1]:\n sentence = line.split('\\n')[0]\n if sentence == last_log:\n break\n data_list.append(sentence)\n if last_log is None and len(data_list) == 100:\n break\n save_data(data_list, collection)\n\n\nclass FileEventHandler(FileSystemEventHandler):\n \"\"\" 监听文件改变\"\"\"\n\n def __init__(self):\n FileSystemEventHandler.__init__(self)\n\n def on_modified(self, event):\n if not event.is_directory:\n path = os.path.realpath(event.src_path)\n deal_log_file(path)\n\n\nif __name__ == \"__main__\":\n event_handler = FileEventHandler()\n observer = Observer()\n observer.schedule(event_handler, XSL_WATCH_PATH, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n","sub_path":"xsl_watcher.py","file_name":"xsl_watcher.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"72750014","text":"## module conjGrad\r\n''' x, numIter = conjGrad(Av,x,b,tol=1.0e-9)\r\n Conjugate gradient method for solving [A]{x} = {b}.\r\n The matrix [A] should be sparse. User must supply\r\n the function Av(v) that returns the vector [A]{v}.\r\n''' \r\nfrom numarray import dot\r\nfrom math import sqrt\r\n\r\ndef conjGrad(Av,x,b,tol=1.0e-9):\r\n n = len(b)\r\n r = b - Av(x)\r\n s = r.copy()\r\n for i in range(n):\r\n u = Av(s)\r\n alpha = dot(s,r)/dot(s,u)\r\n x = x + alpha*s\r\n r = b - Av(x)\r\n if(sqrt(dot(r,r))) < tol:\r\n break\r\n else:\r\n beta = -dot(r,u)/dot(s,u)\r\n s = r + beta*s\r\n return x,i\r\n\r\n\r\n \r\n","sub_path":"Numerical_Eng_Python/conjGrad.py","file_name":"conjGrad.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"62269539","text":"\"\"\"\nMAIN PURPOSE: calculated density profiles and velocity dispersion for merger host galaxies\n\"\"\"\n\nimport os\n\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nimport h5py\nimport tqdm\n\nfrom utils import SubProcess\n\nh = 0.704\n\n\ndef fit_func(x, a, b):\n \"\"\"\n Power law fit for the density profile\n \"\"\"\n return a*x**-b\n\n\nclass Density_Vel_Disp(SubProcess):\n \"\"\"\n Density_Vel_Disp calculates density profiles and velocity dispersions for the mergers. It gets density profiles for all particle types in remnant black hole host galaxies. Stellar velocity dispersions are calcualted for all merger-related galaxies. If a fit does not converge, the merger is no longer considered part of our catalog.\n\n Note: when downloading all the subhalos, some downloads may not have been perfect meaning the files will not open. This should be a very small number. If this is the case, the code will error and stop running. you can see the last subhalo that the code tried to open. Use `download_single.py` for these files.\n\n attributes:\n :param dir_output - (str) - dir_output to work in\n\n methods:\n find_fit\n gather_info_from_mergers\n gather_info_from_subs_with_bhs\n fit_main\n\n \"\"\"\n\n def __init__(self, core, **kwargs):\n super().__init__(core)\n\n fname_dens = core.fname_density_profiles()\n fname_vdis = core.fname_vel_disp()\n self.needed = self._check_needed(fname_dens, fname_vdis)\n\n # exist_dens = os.path.exists(fname_dens)\n # exist_vdis = os.path.exists(fname_vdis)\n # if exist_dens and exist_vdis:\n # self.needed = False\n # print(\"Files already exist ('{}', '{}')\".format(fname_dens, fname_vdis))\n # else:\n # self.needed = True\n # print(\"Missing files\")\n # print(\"\\t{}: {}\".format(fname_dens, exist_dens))\n # print(\"\\t{}: {}\".format(fname_vdis, exist_vdis))\n\n self.fname_dens = fname_dens\n self.fname_vdis = fname_vdis\n\n return\n\n def fit_main(self):\n \"\"\"\n Main function controlling fitting density profiles and getting velocity dispersions.\n \"\"\"\n\n # get information about subs downloaded\n fname_needed = self.core.fname_snaps_and_subs()\n subs_downloaded = np.genfromtxt(fname_needed, dtype=int)\n\n # get other information needed\n merger_time = self.gather_info_from_mergers()\n subs_gc, snaps_gc, subhalo_cm_gc = self.gather_info_from_subs_with_bhs()\n\n vel_disp_out = []\n density_profile_out = []\n\n # this figures out the remnant host halo. Order in ``snaps_and_subs_needed.txt`` is 3,2,1\n uni_mergers, uni_index = np.unique(subs_downloaded[:, 0], return_index=True)\n\n # bin selection arbitrary but did not affect results much\n # bins = 100\n\n # idea is to only get velocity dispersion of other subhalos if main halo can get the fit.\n # if fit does not work , the merger is not considered\n # tot_num = uni_index.size\n for num, m_start_ind in enumerate(tqdm.tqdm(uni_index, desc='Subhalos')):\n run_vel_disp = True\n\n # if num < tot_num-10:\n # continue\n #\n # # try:\n # # break\n # # except:\n # # print(\"break failed!\")\n # # continue\n\n # this scrolls through the three subhalos per merger\n for index in np.arange(m_start_ind, m_start_ind+3)[::-1]:\n\n if run_vel_disp == False:\n continue\n\n # extract galaxy info\n row = subs_downloaded[index]\n snap = row[2]\n sub = row[3]\n\n # merger index\n m = row[0]\n\n # final, prev_in, or prev_out\n which = row[1]\n # print(snap, sub, num)\n if which == 3:\n # scale factor\n scale = merger_time[m]\n\n ind = np.where((snaps_gc == snap) & (subs_gc == sub))[0][0]\n\n # center of mass of galaxy\n sub_cm = subhalo_cm_gc[ind]\n\n fname_subh = self.core.fname_snap_sub_cutout(snap, sub)\n with h5py.File(fname_subh, 'r') as f:\n # for local testing\n # with h5py.File('cutout_%i_%i.hdf5'%(snap, sub), 'r') as f:\n\n # stars fits\n stars = f['PartType4']\n\n # if GFM_StellarFormationTime < 0.0, it is a wind particle, not a star.\n # keep = np.where(stars['GFM_StellarFormationTime'][:] >= 0.0)[0]\n keep = (stars['GFM_StellarFormationTime'][:] >= 0.0)\n\n # confirm >80 star particles (there should be from previous analysis)\n if np.count_nonzero(keep) < 80 and which == 3:\n run_vel_disp = False\n continue\n\n # If a fit cannot be done, then catch error and go to next merger\n try:\n var, run_vel_disp = self.find_fit(\n stars['Coordinates'][:][keep],\n stars['Masses'][:][keep]*1e10/h,\n sub_cm,\n scale\n )\n if run_vel_disp == False:\n continue\n\n except RuntimeError:\n run_vel_disp = False\n continue\n\n # store gamma value for stars\n star_norm, star_gamma = var\n\n # gas second #\n\n # halos missing gas are not consider these as part of the catalog.\n try:\n gas = f['PartType0']\n except KeyError:\n run_vel_disp = False\n continue\n\n # make sure 80 gas cells\n if len(gas['Coordinates'][:]) < 80:\n run_vel_disp = False\n continue\n\n try:\n var, run_vel_disp = self.find_fit(\n gas['Coordinates'][:], gas['Masses'][:]*1e10/h, sub_cm, scale)\n if run_vel_disp == False:\n continue\n except RuntimeError:\n run_vel_disp = False\n continue\n\n gas_norm, gas_gamma = var\n\n # dm third\n dm = f['PartType4']\n\n # make sure 300 dm particles\n if len(dm['Coordinates'][:]) < 300:\n run_vel_disp = False\n continue\n\n try:\n # dm particles all have the same mass\n var, run_vel_disp = self.find_fit(\n dm['Coordinates'][:],\n np.full((len(dm['Coordinates'][:]),), 6.3e6), sub_cm, scale)\n if run_vel_disp == False:\n continue\n\n except RuntimeError:\n run_vel_disp = False\n continue\n\n dm_norm, dm_gamma = var\n\n # add data to list if all try/excepts are succesful\n density_profile_out.append(\n [m, which, snap, sub,\n star_norm, star_gamma,\n gas_norm, gas_gamma,\n dm_norm, dm_gamma]\n )\n\n # get velocity dispersion\n if run_vel_disp:\n vdisp = np.std(np.sqrt(np.sum(stars['Velocities'][:]**2, axis=1)))\n vel_disp_out.append([m, which, snap, sub, vdisp])\n\n else:\n # get only velocity dispersion for constituent halos\n if run_vel_disp:\n fname_subh = self.core.fname_snap_sub_cutout(snap, sub)\n with h5py.File(fname_subh, 'r') as f:\n # for testing in local directories\n # with h5py.File('cutout_%i_%i.hdf5'%(snap, sub), 'r') as f:\n v2 = f['PartType4']['Velocities'][:] ** 2\n vdis = np.std(np.sqrt(np.sum(v2, axis=1)))\n vel_disp_out.append([m, which, snap, sub, vdis])\n\n # prep all the lists for read out and read out to files #\n\n density_profile_out = np.asarray(density_profile_out).T\n vel_disp_out = np.asarray(vel_disp_out).T\n\n density_profile_out = [density_profile_out[i] for i in range(len(density_profile_out))]\n vel_disp_out = [vel_disp_out[i] for i in range(len(vel_disp_out))]\n\n dtype = [\n ('merger', np.dtype(int)),\n ('which', np.dtype(int)),\n ('snap', np.dtype(int)),\n ('sub', np.dtype(int)),\n ('star_norm', np.dtype(float)),\n ('star_gamma', np.dtype(float)),\n ('gas_norm', np.dtype(float)),\n ('gas_gamma', np.dtype(float)),\n ('dm_norm', np.dtype(float)),\n ('dm_gamma', np.dtype(float))\n ]\n density_profile_out = np.core.records.fromarrays(density_profile_out, dtype=dtype)\n\n header = \\\n 'm\\twhich\\tsnap\\tsub\\tstar_norm\\tstar_gamma\\tgas_norm\\tgas_gamma\\tdm_norm\\tdm_gamma'\n fmt = '%i\\t%i\\t%i\\t%i\\t%.18e\\t%.18e\\t%.18e\\t%.18e\\t%.18e\\t%.18e'\n fname = self.fname_dens\n np.savetxt(fname, density_profile_out, fmt=fmt, header=header)\n print(\"Saved to '{}' size: {}\".format(fname, os.path.getsize(fname)))\n\n dtype = [\n ('merger', np.dtype(int)),\n ('which', np.dtype(int)),\n ('snap', np.dtype(int)),\n ('sub', np.dtype(int)),\n ('vel_disp', np.dtype(float))\n ]\n vel_disp_out = np.core.records.fromarrays(vel_disp_out, dtype=dtype)\n\n fmt = '%i\\t%i\\t%i\\t%i\\t%.18e'\n header = 'm\\twhich\\tsnap\\tsub\\tvel_disp'\n\n fname = self.fname_vdis\n np.savetxt(fname, vel_disp_out, fmt=fmt, header=header)\n print(\"Saved to '{}' size: {}\".format(fname, os.path.getsize(fname)))\n\n return\n\n def find_fit(self, coordinates, masses, sub_cm, scale):\n \"\"\"\n Function used to determine fit. Used for all particle types.\n \"\"\"\n\n # radius from CoM\n # comoving to physical -- kpc to pc\n radius = np.sqrt(np.sum((coordinates - sub_cm)**2, axis=1))*scale*1e3\n\n # put in structured array for sorting\n dtype = [('rad', np.dtype(float)), ('mass', np.dtype(float))]\n all_particles = np.core.records.fromarrays([radius, masses], dtype=dtype)\n\n all_particles = np.sort(all_particles, order=('rad',))\n\n extr = [all_particles['rad'][0]-1, all_particles['rad'][-1]+1]\n radial_bins_edges = np.logspace(*np.log10(extr), 100)\n\n # figure out which bin each particle is in\n bin_number_for_each_paricle = np.searchsorted(radial_bins_edges, all_particles['rad'])\n unique_bins, bin_count = np.unique(bin_number_for_each_paricle, return_counts=True)\n\n # make sure there are at least 8 bins with 4 or more particles in them.\n try:\n inds_bins = np.where(bin_count >= 4)[0][0:8]\n except IndexError:\n run_vel_disp = False\n return [], run_vel_disp\n\n unique_bins = unique_bins[inds_bins]\n\n # get necessary bin edges for volume only for eight bins tested\n radial_bins_inner_edges = radial_bins_edges[inds_bins]\n radial_bins_outer_edges = radial_bins_edges[inds_bins+1]\n\n # use bin centers for radial profile calculation\n radial_bin_centers = (radial_bins_outer_edges + radial_bins_inner_edges)/2.\n\n volume_bin = 4/3.*np.pi*(radial_bins_outer_edges**3 - radial_bins_inner_edges**3)\n # density_bin = np.zeros(8)\n\n # find bin densities\n density_bin = np.zeros(len(radial_bin_centers))\n for i, bin in enumerate(unique_bins):\n # which particles are in this bin\n # inds_bin = np.where(bin_number_for_each_paricle == bin)[0]\n inds_bin = (bin_number_for_each_paricle == bin)\n density_bin[i] = np.sum(all_particles['mass'][inds_bin])/volume_bin[i]\n\n # try fit\n var, cov = curve_fit(fit_func, radial_bin_centers[0:8], density_bin[0:8])\n return var, True\n\n def gather_info_from_mergers(self):\n \"\"\"\n Gets merger information.\n \"\"\"\n fname_mergers = self.core.fname_bhs_mergers()\n with h5py.File(fname_mergers, 'r') as mergers:\n # scale factor is needed to scale coordinates\n merger_time = mergers['time'][:]\n\n return merger_time\n\n def gather_info_from_subs_with_bhs(self):\n \"\"\"\n Gets information from galaxies with bhs.\n \"\"\"\n fname_bh_subs = self.core.fname_subs_with_bhs()\n with h5py.File(fname_bh_subs, 'r') as gc:\n subs_gc = gc['SubhaloID'][:]\n snaps_gc = gc['Snapshot'][:]\n subhalo_cm_gc = gc['SubhaloCM'][:]\n\n return subs_gc, snaps_gc, subhalo_cm_gc\n","sub_path":"extraction/utils/density_vel_disp.py","file_name":"density_vel_disp.py","file_ext":"py","file_size_in_byte":13788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"279867583","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.datasets import load_boston\n\n\n# 1. 데이터\n\ndataset = load_boston()\n\nx = dataset.data\ny = dataset.target\n\nfrom sklearn.model_selection import train_test_split\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,random_state = 66 ) \n\n\nprint(x.shape)\nprint(y.shape)\n\n\n\nfrom sklearn.preprocessing import MinMaxScaler\n\nscaler = MinMaxScaler()\nscaler.fit(x_train)\nx_train = scaler.transform(x_train)\nx_test = scaler.transform(x_test)\n\n\n# 데이터 전처리\n\nx_train = x_train.reshape(x_train.shape[0], x_train.shape[1],1, 1)\nx_test = x_test.reshape(x_test.shape[0], x_test.shape[1],1, 1)\n# (x_test.reshape(x_test.shape[0], x_test.shape[1],x_test.shape[2], 1))\n\n\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout,LSTM\n\nmodel = Sequential()\nmodel.add(Conv2D(filters=64, kernel_size=(2), padding='same', input_shape=(13,1,1), activation='relu'))\nmodel.add(Conv2D(filters=52, kernel_size=(2), padding='same', input_shape=(13,1,1), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=1))\nmodel.add(Dropout(0.2))\n\nmodel.add(Conv2D(filters=32, kernel_size=(2), padding='same', input_shape=(13,1,1), activation='relu'))\nmodel.add(Conv2D(filters=18, kernel_size=(2), padding='same', input_shape=(13,1,1), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=1))\nmodel.add(Dropout(0.2))\n\nmodel.add(Flatten())\nmodel.add(Dense(300, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(150, activation='relu'))\nmodel.add(Dropout(0.2))\n\nmodel.add(Dense(1))\n\nmodel.compile(loss = 'mse',optimizer = 'adam', metrics = ['mae'])\n\n\nfrom tensorflow.keras.callbacks import EarlyStopping\nstop = EarlyStopping(monitor='loss', patience=16, mode='auto')\n\nmodel.fit(x_train, y_train, epochs=200, batch_size=5, validation_split=0.2, verbose=2, callbacks=[stop])\n\n\n#4. 평가, 예측\nloss, mae = model.evaluate(x_test, y_test, batch_size=5)\n\ny_pred = model.predict(x_test)\n\n\n\n# RMSE\nfrom sklearn.metrics import mean_squared_error\ndef RMSE(y_test,y_pred):\n return np.sqrt(mean_squared_error(y_test,y_pred))\nprint('RMSE: ',RMSE(y_test,y_pred))\n\n# R2\nfrom sklearn.metrics import r2_score\ndef R2(y_test,y_pred):\n return r2_score(y_test,y_pred)\nprint('R2: ', R2(y_test,y_pred))\n\n\n# CNN - boston\n# RMSE: 3.048824990483019\n# R2: 0.8874889863150697\n\n\n\n","sub_path":"keras/keras41_cnn1_boston.py","file_name":"keras41_cnn1_boston.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"337803432","text":"import datetime\nimport os\nimport time\nfrom collections import Counter, OrderedDict\nfrom typing import Any, Dict, Generator, List, Optional, Union\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport timm\nimport torch\nimport torch.nn as nn\nfrom sklearn import metrics\nfrom sklearn.model_selection import GroupKFold, KFold, StratifiedKFold, TimeSeriesSplit\nfrom torch import optim\nfrom torch.optim import lr_scheduler\nfrom torch.utils import data\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms as T\nfrom tqdm import tqdm\n\nfrom src.exp.exp033.config import InputPath, ModelConfig, OutputPath\nfrom src.utils import AverageMeter, DefaultLogger, Jbl, Logger, fix_seed\n\nsns.set_style(\"whitegrid\")\n\n\ndef validate_config(model_config: ModelConfig) -> None:\n def _validate_run_name(model_config: ModelConfig) -> None:\n # If you want to remove models, run `rm output/model/*expXXX*` in the root dir.\n past_sessions = [\n x.split(\"_\")[0]\n for x in os.listdir(OutputPath.model)\n if x.endswith(\"_0.pth\")\n ]\n run_name = model_config.basic.run_name\n assert (\n run_name not in past_sessions\n ), f\"{run_name} already exists. Check and remove models if you want\"\n\n def _validate_device(model_config: ModelConfig) -> None:\n assert model_config.basic.device == \"cuda\"\n\n _validate_run_name(model_config)\n _validate_device(model_config)\n\n\ndef load_npz(path: str) -> np.array:\n x = np.load(path)[\"arr_0\"]\n return x\n\n\ndef generate_kf(cfg: ModelConfig) -> Generator:\n if cfg.kfold.method == \"kf\":\n kf = KFold(\n n_splits=cfg.kfold.number,\n shuffle=cfg.kfold.shuffle,\n random_state=cfg.basic.seed,\n )\n elif cfg.kfold.method == \"skf\":\n kf = StratifiedKFold(\n n_splits=cfg.kfold.number,\n shuffle=cfg.kfold.shuffle,\n random_state=cfg.basic.seed,\n )\n elif cfg.kfold.method == \"gkf\":\n kf = GroupKFold(n_splits=cfg.kfold.number)\n elif cfg.kfold.method == \"sgkf\":\n raise ValueError(\"kfold method sgkf is not implemented\")\n # kf = StratifiedGroupKFold(\n # n_splits=cfg.kfold.number, random_state=cfg.basic.seed\n # )\n elif cfg.kfold.method == \"tskf\":\n kf = TimeSeriesSplit(n_splits=cfg.kfold.number)\n else:\n raise ValueError(f\"{cfg.kfold.method} is not supported\")\n return kf\n\n\nclass ProbSpaceDataset(data.Dataset):\n IMG_MEAN = [0.485, 0.456, 0.406]\n IMG_STD = [0.229, 0.224, 0.225]\n\n def __init__(\n self, images: np.array, labels: Optional[np.array] = None, is_train: bool = True\n ) -> None:\n \"\"\"images.shape: (b, h, w, c), labels: (b,)\"\"\"\n assert (is_train and labels is not None) or (not is_train and labels is None)\n self.is_train = is_train\n self.images = images\n self.labels = labels\n\n size = (ModelConfig.params.image_size, ModelConfig.params.image_size)\n additional_items = (\n [\n T.ToPILImage(),\n T.Resize(size),\n ]\n if not is_train\n else [\n T.ToPILImage(),\n T.RandomGrayscale(p=0.2),\n T.RandomHorizontalFlip(),\n T.ColorJitter(\n brightness=0.3,\n contrast=0.5,\n saturation=[0.8, 1.3],\n hue=[-0.05, 0.05],\n ),\n T.RandomResizedCrop(size),\n ]\n )\n self.transformer = T.Compose(\n [\n *additional_items,\n T.ToTensor(),\n T.Normalize(mean=self.IMG_MEAN, std=self.IMG_STD),\n ]\n )\n\n def __getitem__(self, index) -> Dict[str, Any]:\n image = self.images[index]\n image = self.transformer(image)\n if self.is_train:\n label = self.labels[index]\n else:\n label = -1\n return {\"image\": image, \"label\": label}\n\n def __len__(self) -> int:\n return len(self.images)\n\n\nclass ProbSpaceModel(nn.Module):\n def __init__(self, model_config: ModelConfig):\n super().__init__()\n self.backbone = timm.create_model(\n model_config.params.model_name,\n pretrained=model_config.params.pretrained,\n num_classes=model_config.params.target_size,\n )\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, x):\n x = self.backbone(x)\n x = self.softmax(x)\n return x\n\n\ndef build_model(model_config: ModelConfig):\n model = ProbSpaceModel(model_config)\n model.to(model_config.basic.device)\n return model\n\n\nclass BaseRunner:\n def __init__(self, cfg: ModelConfig, logger: Optional[Logger] = None):\n self.cfg = cfg\n self.params = cfg.params\n if logger is not None:\n self.logger = logger\n else:\n logger = DefaultLogger()\n self.logger = logger\n self.logger.info(self.cfg)\n\n def _get_scheduler(\n self, optimizer: Union[optim.Adam]\n ) -> Union[lr_scheduler.ReduceLROnPlateau]:\n if self.params.scheduler.name == \"ReduceLROnPlateau\":\n scheduler = lr_scheduler.ReduceLROnPlateau(\n optimizer,\n mode=self.params.scheduler.mode,\n factor=self.params.scheduler.factor,\n patience=self.params.scheduler.patience,\n verbose=self.params.scheduler.verbose,\n eps=self.params.scheduler.eps,\n )\n else:\n raise ValueError(f\"{self.params.scheduler.name} is not supported\")\n return scheduler\n\n def _step_scheduler(\n self,\n scheduler: Union[\n lr_scheduler.ReduceLROnPlateau,\n ],\n avg_val_loss,\n ) -> Union[lr_scheduler.ReduceLROnPlateau]:\n if isinstance(scheduler, lr_scheduler.ReduceLROnPlateau):\n scheduler.step(avg_val_loss)\n else:\n raise ValueError(f\"{self.params.shceduler.name} is not supported\")\n return scheduler\n\n def _evaluate(\n self, y_true: np.array, y_pred: np.array, verbose: bool = False\n ) -> float:\n score = metrics.accuracy_score(y_true, y_pred)\n if verbose:\n self.logger.info(f\"Score: {score:<.5f}\")\n return score\n\n\nclass TrainRunner(BaseRunner):\n def _train_epoch(self, train_loader, model, criterion, optimizer):\n losses = AverageMeter()\n model.train()\n for _ in range(self.cfg.params.num_aug):\n for _, image_label_dict in enumerate(train_loader):\n images = image_label_dict.get(\"image\").to(self.cfg.basic.device)\n labels = image_label_dict.get(\"label\").to(self.cfg.basic.device)\n batch_size = labels.size(0)\n\n y_preds = model(images)\n loss = criterion(y_preds, labels)\n losses.update(loss.item(), batch_size)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return losses.avg\n\n def _valid_epoch(self, valid_loader, model, criterion):\n losses = AverageMeter()\n model.eval()\n preds = []\n for _, image_label_dict in enumerate(valid_loader):\n images = image_label_dict.get(\"image\").to(self.cfg.basic.device)\n labels = image_label_dict.get(\"label\").to(self.cfg.basic.device)\n batch_size = labels.size(0)\n\n with torch.no_grad():\n y_preds = model(images)\n loss = criterion(y_preds, labels)\n losses.update(loss.item(), batch_size)\n preds.append(y_preds.to(\"cpu\").numpy())\n predictions = np.concatenate(preds).reshape(-1, self.params.target_size)\n return losses.avg, predictions\n\n def _train(\n self,\n train: pd.DataFrame,\n train_images: np.array,\n train_labels: np.array,\n n_fold: int,\n ) -> pd.DataFrame:\n self.logger.info(f\"fold: {n_fold}\")\n\n is_tta_mode = self.params.num_tta > 0\n num_times_tta = 1 if not is_tta_mode else self.params.num_tta\n\n trn_idx = train[train[\"fold\"] != n_fold].index.tolist()\n val_idx = train[train[\"fold\"] == n_fold].index.tolist()\n train_images_folds = train_images[trn_idx]\n valid_images_folds = train_images[val_idx]\n train_labels_folds = train_labels[trn_idx]\n valid_labels_folds = train_labels[val_idx]\n # train_folds = train.loc[trn_idx].reset_index(drop=True)\n # valid_folds = train.loc[val_idx].reset_index(drop=True)\n valid_folds = train.loc[val_idx]\n train_dataset = ProbSpaceDataset(\n train_images_folds,\n train_labels_folds,\n is_train=True,\n # transform=get_transforms(self.params, data=\"train\"),\n )\n valid_dataset = ProbSpaceDataset(\n valid_images_folds,\n valid_labels_folds,\n is_train=is_tta_mode,\n # transform=get_transforms(self.params, data=\"valid\"),\n )\n train_loader = DataLoader(\n train_dataset,\n batch_size=self.params.batch_size,\n shuffle=True,\n num_workers=self.params.num_workers,\n pin_memory=True,\n drop_last=True,\n )\n valid_loader = DataLoader(\n valid_dataset,\n batch_size=self.params.batch_size,\n shuffle=False,\n num_workers=self.params.num_workers,\n pin_memory=True,\n drop_last=False,\n )\n\n # 少数クラスほど重みをつける\n weights = 1 / np.array(\n [Counter(train_labels_folds)[i] for i in range(self.params.target_size)]\n )\n weights = weights / np.sum(weights)\n assert np.all(weights != 0)\n weights = torch.tensor(weights).float().to(self.cfg.basic.device)\n\n model = build_model(model_config=self.cfg)\n model.to(self.cfg.basic.device)\n optimizer = optim.Adam(\n model.parameters(),\n lr=self.params.optimizer.lr,\n weight_decay=self.params.optimizer.weight_decay,\n amsgrad=self.params.optimizer.amsgrad,\n )\n scheduler = self._get_scheduler(optimizer)\n criterion = nn.CrossEntropyLoss(weight=weights)\n\n best_model = None\n best_preds = None\n best_score = 0\n scores: List[float] = []\n num_not_improved = 0\n for epoch in range(self.params.epochs):\n start_time = time.time()\n\n avg_loss = self._train_epoch(\n train_loader, model, criterion, optimizer, scheduler, epoch\n )\n avg_val_loss_list: List[float] = []\n preds_array = np.zeros(\n (num_times_tta, len(val_idx), self.params.target_size)\n )\n for i in range(num_times_tta):\n avg_val_loss, preds = self._valid_epoch(valid_loader, model, criterion)\n avg_val_loss_list.append(avg_val_loss)\n preds_array[i] = preds\n avg_val_loss = np.mean(avg_val_loss_list)\n scheduler = self._step_scheduler(scheduler, avg_val_loss)\n\n preds = preds_array.mean(axis=0)\n preds_ = np.argmax(preds, axis=1)\n valid_labels_evaluate = valid_folds.loc[: self.original_train_length, :]\n valid_labels_evaluate = valid_labels_evaluate[\"target\"].values\n preds_evaluate = preds_[: len(valid_labels_evaluate)]\n score = self._evaluate(valid_labels_evaluate, preds_evaluate)\n scores.append(score)\n elapsed = time.time() - start_time\n self.logger.info(\n f\"Epoch {epoch+1} - avg_train_loss: {avg_loss:.4f} avg_val_loss: {avg_val_loss:.4f} time: {elapsed:.0f}s\"\n )\n self.logger.info(f\"Epoch {epoch+1} - Accuracy: {score}\")\n if score > best_score:\n best_model = model\n best_preds = preds\n best_score = score\n num_not_improved = 0\n else:\n num_not_improved += 1\n self.logger.info(f\"Epoch {epoch+1} - Best Score: {best_score:.4f}\")\n if (\n self.params.early_stopping_rounds > 0\n and self.params.early_stopping_rounds == num_not_improved\n ):\n self.logger.info(\n f\"Early stopping break: not improved {num_not_improved} times in a row\"\n )\n break\n\n torch.save(\n {\n \"model\": best_model.state_dict(),\n \"preds\": best_preds,\n \"best_score\": best_score,\n \"scores\": scores,\n \"config\": self.cfg,\n \"preds_evaluate_length\": len(preds_evaluate),\n },\n f\"{OutputPath.model}/{self.cfg.basic.run_name}_{n_fold}.pth\",\n )\n preds_check_point: Dict[str, Union[OrderedDict, torch.Tensor]] = torch.load(\n f\"{OutputPath.model}/{self.cfg.basic.run_name}_{n_fold}.pth\"\n )[\"preds\"]\n valid_folds[\"preds\"] = np.argmax(preds_check_point, axis=1)\n return valid_folds\n\n def run_cv(self, train: pd.DataFrame) -> None:\n self.logger.info(f\"Runner: {self.__class__.__name__}\")\n self.logger.info(f\"debug mode: {self.cfg.basic.is_debug}\")\n self.logger.info(f\"start time: {datetime.datetime.now()}\")\n train_images = load_npz(InputPath.train_images)\n train_labels = load_npz(InputPath.train_labels)\n self.original_train_length = len(train_images)\n if self.params.is_psuedo_labeling:\n pseudo_dict = self.psuedo_label(train)\n test_psuedo = pd.DataFrame({\"target\": pseudo_dict.values()})\n kf = generate_kf(self.cfg)\n kf_generator = kf.split(test_psuedo, test_psuedo[\"target\"])\n for fold_i, (_, val_idx) in enumerate(kf_generator):\n test_psuedo.loc[val_idx, \"fold\"] = fold_i\n test_psuedo = test_psuedo.assign(fold=test_psuedo[\"fold\"].astype(int))\n train = pd.concat((train, test_psuedo), axis=0, ignore_index=True)\n test_images = load_npz(InputPath.test_images)\n test_images_for_train = test_images[list(pseudo_dict.keys())]\n test_labels_for_train = np.array(list(pseudo_dict.values()))\n train_images = np.concatenate((train_images, test_images_for_train), axis=0)\n train_labels = np.concatenate((train_labels, test_labels_for_train), axis=0)\n assert train_images.shape[1:] == (224, 224, 3)\n assert train_labels.ndim == 1\n assert len(train_images) == len(train_labels)\n assert len(train_images) == len(train)\n oof_df = pd.DataFrame()\n for n_fold in range(self.cfg.kfold.number):\n start_time = time.time()\n _oof_df = self._train(train, train_images, train_labels, n_fold)\n elapsed = time.time() - start_time\n self.logger.info(f\"========== fold: {n_fold} result ==========\")\n self.logger.info(f\"fold{n_fold} time: {elapsed/60:.0f}min.\")\n _oof_df_evaluate = _oof_df.loc[: self.original_train_length, :]\n score = self._evaluate(\n _oof_df_evaluate[\"target\"], _oof_df_evaluate[\"preds\"], verbose=True\n )\n if hasattr(self.logger, \"result\"):\n self.logger.result(f\"Fold {n_fold} Score: {score:<.5f}\")\n oof_df = pd.concat([oof_df, _oof_df])\n self.logger.info(\"========== CV ==========\")\n oof_df_evaluate = oof_df.loc[: self.original_train_length, :]\n score = self._evaluate(\n oof_df_evaluate[\"target\"], oof_df_evaluate[\"preds\"], verbose=True\n )\n if hasattr(self.logger, \"result\"):\n self.logger.result(f\"CV Score: {score:<.5f}\")\n Jbl.save(oof_df, f\"{OutputPath.model}/oof_df_{self.cfg.basic.run_name}.jbl\")\n\n def psuedo_label(self, train: pd.DataFrame) -> Dict[int, int]:\n psuedo_base_run_name = self.params.psuedo_base_run_name\n self.logger.info(f\"Execute psuedo labeling with {psuedo_base_run_name}\")\n preds_train = np.zeros((len(train), self.params.target_size))\n for i in tqdm(range(self.cfg.kfold.number)):\n train_fold = train.loc[train[\"fold\"] == i, :]\n preds = torch.load(f\"{OutputPath.model}/{psuedo_base_run_name}_{i}.pth\")[\n \"preds\"\n ]\n preds_train[train_fold.index] = preds\n train_concat = pd.concat(\n (\n train[[\"target\"]],\n pd.DataFrame(np.max(preds_train, axis=1), columns=[\"pred_max\"]),\n pd.DataFrame(np.argmax(preds_train, axis=1), columns=[\"pred\"]),\n ),\n axis=1,\n )\n train_concat = train_concat.assign(\n is_correct=train_concat[\"target\"] == train_concat[\"pred\"]\n )\n threshold = train_concat.loc[\n train_concat[\"target\"] == train_concat[\"pred\"], :\n ].pred_max.mean()\n\n preds_test = Jbl.load(\n f\"{OutputPath.model}/preds_test_{psuedo_base_run_name}.jbl\"\n )\n preds_test = np.array(preds_test).mean(axis=0)\n test = pd.DataFrame(\n preds_test,\n columns=[f\"pred_{i}\" for i in range(self.params.target_size)],\n )\n test = test.assign(\n pred_max=np.max(preds_test, axis=1),\n pred=np.argmax(preds_test, axis=1),\n )\n test_pseudo = test[test[\"pred_max\"] >= threshold]\n pseudo_dict: Dict[int, int] = {}\n for idx, pred in zip(test_pseudo.index, test_pseudo[\"pred\"]):\n pseudo_dict[idx] = pred\n return pseudo_dict\n\n\nclass InferenceRunner(BaseRunner):\n def _test_epoch(self, test_loader, model):\n model.eval()\n preds = []\n for step, image_label_dict in enumerate(test_loader):\n images = image_label_dict.get(\"image\").to(self.cfg.basic.device)\n with torch.no_grad():\n y_preds = model(images)\n preds.append(y_preds.to(\"cpu\").numpy())\n predictions = np.concatenate(preds).reshape(-1, self.params.target_size)\n return predictions\n\n def _test(self, test: pd.DataFrame, test_images: np.array, n_fold: int):\n self.logger.info(f\"fold: {n_fold}\")\n\n is_tta_mode = self.params.num_tta > 0\n num_times_tta = 1 if not is_tta_mode else self.params.num_tta\n\n test_dataset = ProbSpaceDataset(\n test_images,\n is_train=False,\n )\n test_loader = DataLoader(\n test_dataset,\n batch_size=self.params.test_batch_size,\n shuffle=False,\n num_workers=self.params.num_workers,\n pin_memory=True,\n drop_last=False,\n )\n\n model = build_model(model_config=self.cfg)\n model_state = torch.load(\n f\"{OutputPath.model}/{self.cfg.basic.run_name}_{n_fold}.pth\"\n )[\"model\"]\n model.load_state_dict(model_state)\n model.to(self.cfg.basic.device)\n # preds = self._test_epoch(test_loader, model)\n preds_array = np.zeros(\n (num_times_tta, len(test_images), self.params.target_size)\n )\n for i in range(num_times_tta):\n _preds = self._test_epoch(test_loader, model)\n preds_array[i] = _preds\n preds = preds_array.mean(axis=0)\n return preds\n\n def _submit(self, preds: np.array) -> None:\n test_images = load_npz(InputPath.test_images)\n df_sub = pd.DataFrame({\"id\": list(range(len(test_images)))})\n df_sub = df_sub.assign(y=preds)\n self.logger.info(df_sub.head())\n df_sub = df_sub.astype(int)\n path = f\"{OutputPath.submission}/submission_{self.cfg.basic.run_name}.csv\"\n df_sub.to_csv(path, index=False)\n self.logger.info(\"submission.csv created\")\n\n def run_cv(self, test: pd.DataFrame = None) -> None:\n self.logger.info(f\"Runner: {self.__class__.__name__}\")\n test_images = load_npz(InputPath.test_images)\n preds: List[np.array] = []\n for n_fold in range(self.cfg.kfold.number):\n preds_fold = self._test(test, test_images, n_fold)\n preds.append(preds_fold)\n Jbl.save(preds, f\"{OutputPath.model}/preds_test_{self.cfg.basic.run_name}.jbl\")\n\n preds_mean = np.mean(preds, axis=0)\n assert preds_mean.shape == (497, 13)\n preds_mean = preds_mean.argmax(axis=1)\n assert preds_mean.shape == (497,)\n self._submit(preds_mean)\n\n\ndef visualize_prediction(model_config: ModelConfig, logger: Logger) -> None:\n logger.info(\n Jbl.load(f\"{OutputPath.model}/oof_df_{model_config.basic.run_name}.jbl\").head()\n )\n sns.countplot(\n x=Jbl.load(f\"{OutputPath.model}/oof_df_{model_config.basic.run_name}.jbl\")[\n \"preds\"\n ]\n )\n plt.savefig(f\"{OutputPath.model}/pred_countplot_{model_config.basic.run_name}.png\")\n\n\ndef main():\n fix_seed()\n model_config = ModelConfig()\n validate_config(model_config)\n\n train_labels = load_npz(InputPath.train_labels)\n train = pd.DataFrame({\"target\": train_labels})\n\n kf = generate_kf(model_config)\n kf_generator = kf.split(train, train[\"target\"])\n for fold_i, (_, val_idx) in enumerate(kf_generator):\n train.loc[val_idx, \"fold\"] = fold_i\n train = train.assign(fold=train[\"fold\"].astype(int))\n\n run_name = model_config.basic.run_name\n logger = Logger(\n f\"{OutputPath.logs}/{run_name}/general.log\",\n f\"{OutputPath.logs}/{run_name}/result.log\",\n run_name,\n )\n TrainRunner(model_config, logger).run_cv(train)\n InferenceRunner(model_config, logger).run_cv()\n\n visualize_prediction(model_config, logger)\n","sub_path":"src/exp/exp033/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":21942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"127508377","text":"'''\nGiven a non-negative integer x, compute and return the square root of x.\n\nSince the return type is an integer, the decimal digits are truncated, and only the integer part of the result is returned.\n\nExample:\nInput: x = 8\nOutput: 2\nExplanation: The square root of 8 is 2.82842..., and since the decimal part is truncated, 2 is returned.\n'''\n\nclass Solution:\n def mySqrt(self, x: int) -> int:\n if x==0:\n return 0\n \n start=1\n end=x\n ans = 0\n\n while(start<=end):\n mid = start + (end-start)//2\n if mid<=x/mid:\n ans = mid\n start = mid+1\n else:\n end = mid-1\n \n return int(ans)\n","sub_path":"sqrt.py","file_name":"sqrt.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"2060601","text":"from .resourceHandler import ResourceHandler\r\n\r\n\r\nclass APIDevice(ResourceHandler):\r\n component = 'device'\r\n default_sort = 'origin'\r\n description = 'Add/Update/Delete device (Origin) settings'\r\n has_rpc_calls = True\r\n\r\n def post(self, identifier, data, resource_def, resource_info, *args, **kwargs):\r\n resource = resource_def(self._data_manager, identifier=identifier)\r\n if self.api_req.content_type == 'application/json-rpc':\r\n try:\r\n call = self.api_req.data['call']\r\n args = self.api_req.data.get('args', {})\r\n if call == 'device_state':\r\n active = args.get('active', 1)\r\n origin = resource['origin']\r\n self._data_manager.set_device_state(origin, active)\r\n if active == 0:\r\n self._mapping_manager.device_set_disabled(origin)\r\n self._ws_server.force_disconnect(origin)\r\n return (None, 200)\r\n elif call == 'flush_level':\r\n resource.flush_level()\r\n return (None, 204)\r\n else:\r\n return (call, 501)\r\n except KeyError:\r\n return (call, 501)\r\n else:\r\n return super().post(identifier, data, resource_def, resource_info, *args, **kwargs)\r\n","sub_path":"mapadroid/madmin/api/resources/ftr_device.py","file_name":"ftr_device.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"341256029","text":"from base_log_parser import BaseSessionLogParser, UnparsableLineException\nfrom system2_log_parser import System2LogParser\nimport numpy as np\nimport re\nimport json\n\n\ndef PSLogParser(protocol, subject, montage, experiment, files):\n \"\"\"\n Decides which of the PS parsers to use\n :param protocol:\n :param subject:\n :param montage:\n :param experiment:\n :param files:\n :return:\n \"\"\"\n if 'session_log' in files:\n return PSSessionLogParser(protocol, subject, montage, experiment, files)\n else:\n return PSHostLogParser(protocol, subject, montage, experiment, files)\n\n\nclass PSSessionLogParser(BaseSessionLogParser):\n\n _STIM_PARAM_FIELDS = System2LogParser.sys2_fields()\n\n PULSE_WIDTH = 300\n\n @classmethod\n def empty_stim_params(cls):\n \"\"\"\n Makes a recarray for empty stim params (no stimulation)\n :return:\n \"\"\"\n return cls.event_from_template(cls._STIM_PARAM_FIELDS)\n\n @classmethod\n def _ps_fields(cls):\n \"\"\"\n Returns the template for a new FR field\n Has to be a method because of call to empty_stim_params, unfortunately\n :return:\n \"\"\"\n return (\n ('exp_version', '', 'S16'),\n ('ad_observed', 0, 'b1'),\n ('is_stim', 0, 'b1')\n )\n\n\n def __init__(self, protocol, subject, montage, experiment, files):\n super(PSSessionLogParser, self).__init__(protocol, subject, montage, experiment, files,\n include_stim_params=True)\n self._exp_version = '1.0'\n self._session = -999\n self._stim_anode = None\n self._stim_cathode = None\n self._stim_anode_label = None\n self._stim_cathode_label = None\n self._stim_amplitude = None\n self._previous_stim_duration = None\n self._saw_ad = False\n self._add_fields(*self._ps_fields())\n self._add_type_to_new_event(\n STIM_LOC=self.stim_loc,\n AMPLITUDE_CONFIRMED=self.amplitude_confirmed,\n STIMULATING=self.event_stimulating,\n BEGIN_BURST=self.begin_burst,\n AFTER_DISCHARGE=self.event_ad_check,\n ADs_CHECKED=self.event_ads_checked,\n BEGIN_PS1=self.event_default,\n BEGIN_PS2=self.event_default,\n BEGIN_PS3=self.event_default,\n BURST=self._event_skip,\n END_EXP=self.event_default,\n RAM_PS=self.ram_ps,\n STIM_SINGLE_PULSE=self.event_stim_single_pulse,\n PAUSED=self.event_default,\n UNPAUSED=self.event_default\n )\n self._add_type_to_modify_events(\n BEGIN_PS1=self.begin_ps1,\n BEGIN_PS2=self.begin_ps2,\n BEGIN_PS3=self.begin_ps3,\n AFTER_DISCHARGE=self.modify_ad_check,\n STIMULATING=self.make_stim_off,\n BEGIN_BURST=self.make_stim_off,\n )\n\n def event_default(self, split_line):\n event = super(PSSessionLogParser, self).event_default(split_line)\n event.exp_version = self._exp_version\n return event\n\n def stim_loc(self, split_line):\n self._stim_anode_label = split_line[4]\n self._stim_cathode_label = split_line[6]\n #reverse_jacksheet = {v: k for k, v in self._jacksheet_dict.items()}\n #self._stim_anode = reverse_jacksheet[self._stim_anode_label]\n #self._stim_cathode = reverse_jacksheet[self._stim_cathode_label]\n return False\n\n def ram_ps(self, split_line):\n self._exp_version = re.sub(r'[^\\d.]', '', split_line[3])\n return False\n\n def amplitude_confirmed(self, split_line):\n self._stim_amplitude = float(split_line[3])\n return False\n\n def begin_ps1(self, events):\n events.experiment = 'PS1'\n self._experiment = 'PS1'\n return events\n\n def begin_ps2(self, events):\n events.experiment = 'PS2'\n self._experiment = 'PS2'\n return events\n\n def begin_ps3(self, events):\n events.experiment = 'PS3'\n self._experiment = 'PS3'\n return events\n\n def begin_burst(self, split_line):\n event = self.event_default(split_line)\n event.type = 'STIM'\n params = {}\n params['anode_label'] = self._stim_anode_label\n params['cathode_label'] = self._stim_cathode_label\n params['amplitude'] = float(split_line[7])\n params['pulse_freq'] = int(split_line[3])\n params['burst_freq'] = int(split_line[4])\n params['n_pulses'] = int(split_line[5])\n params['n_bursts'] = int(split_line[6])\n params['pulse_width'] = self.PULSE_WIDTH\n params['stim_duration'] = 1000. * params['n_bursts'] / params['burst_freq']\n params['stim_on'] = True\n self._previous_stim_duration = params['stim_duration'] + params['n_pulses'] * params['pulse_freq']\n self.set_event_stim_params(event, self._jacksheet, **params)\n return event\n\n def event_stimulating(self, split_line):\n event = self.event_default(split_line)\n event.type = 'STIM'\n event.is_stim = True\n params = {}\n params['stim_duration'] = int(split_line[5])\n\n if not self._stim_anode_label or not self._stim_cathode_label:\n raise UnparsableLineException('Stim occurred prior to defining stim pairs!')\n\n #params['anode_number'] = self._stim_anode\n #params['cathode_number'] = self._stim_cathode\n params['anode_label'] = self._stim_anode_label\n params['cathode_label'] = self._stim_cathode_label\n params['amplitude'] = float(split_line[4])\n params['pulse_freq'] = int(split_line[3])\n params['n_pulses'] = params['pulse_freq'] / params['stim_duration']\n params['burst_freq'] = 1\n params['n_bursts'] = 1\n params['pulse_width'] = self.PULSE_WIDTH\n params['stim_on'] = True\n self._previous_stim_duration = params['stim_duration']\n self.set_event_stim_params(event, self._jacksheet, **params)\n return event\n\n def event_stim_single_pulse(self, split_line):\n event = self.event_default(split_line)\n event.is_stim = True\n if not self._stim_anode_label or not self._stim_cathode_label:\n raise UnparsableLineException('Stim occurred prior to defining stim pairs!')\n\n self.set_event_stim_params(event, self._jacksheet,\n anode_label=self._stim_anode_label,\n cathode_label=self._stim_cathode_label,\n amplitude=float(split_line[3]),\n pulse_freq=-1,\n n_pulses=1,\n burst_freq=1,\n n_bursts=1,\n pulse_width=self.PULSE_WIDTH,\n stim_duration=1,\n stim_on=True)\n return event\n\n def make_stim_off(self, events):\n off_event = events[-1].copy()\n off_event.type = 'STIM_OFF'\n off_event.is_stim = False\n off_event.mstime += self._previous_stim_duration\n self.set_event_stim_params(off_event, self._jacksheet, stim_on=False)\n\n return np.append(events, off_event).view(np.recarray)\n\n def event_ads_checked(self, split_line):\n event = self.event_default(split_line)\n event.type = 'AD_CHECK'\n return event\n\n def event_ad_check(self, split_line):\n event = self.event_default(split_line)\n event.type = 'AD_CHECK'\n self._saw_ad = split_line[3] == 'YES'\n event.ad_observed = self._saw_ad\n return event\n\n def modify_ad_check(self, events):\n if not self._saw_ad:\n return events\n last_ad_check = np.where(events.type == 'AD_CHECK')[0]\n if len(last_ad_check) > 1:\n start_index = last_ad_check[-2]\n else:\n start_index = 0\n events[start_index:].ad_observed = True\n return events\n\n\nclass PSHostLogParser(BaseSessionLogParser):\n\n _TYPE_INDEX = 1\n _SPLIT_DELIMITER = '~'\n\n NP_TIC_RATE = 1000\n\n _PS_FIELDS = (\n ('exp_version', '', 'S16'),\n ('ad_observed', 0, 'b1'),\n ('is_stim', 0, 'b1')\n )\n\n def __init__(self, protocol, subject, montage, experiment, files,\n primary_log='host_logs', allow_unparsed_events=True, include_stim_params=True):\n super(PSHostLogParser, self).__init__(protocol, subject, montage, experiment, files,\n primary_log, allow_unparsed_events, include_stim_params)\n self._exp_version = '2.0'\n self._session = -999\n self._saw_ad = False\n self.beginning_marked = False\n\n eeg_sources = json.load(open(files['eeg_sources']))\n first_eeg_source = sorted(eeg_sources.values(), key=lambda source: source['start_time_ms'])[0]\n np_earliest_start = first_eeg_source['start_time_ms']\n self.host_offset = None\n\n for line in self._contents:\n if line[1] == 'NEUROPORT-TIME':\n # np_start_host = host_time - (np_time / 30)\n # np_start_host + host_offset = np_start_ms\n # host_time + host_offset = ms_time\n np_start_host = int(line[0]) - int(line[2])/30\n self.host_offset = np_start_host - np_earliest_start\n break\n if not self.host_offset:\n raise UnparsableLineException(\"Cannot determine host offset\")\n\n self._add_fields(*self._PS_FIELDS)\n\n self._add_type_to_new_event(\n SHAM=self.event_default,\n AD_CHECK=self.event_ad_check,\n PS2=self.event_version\n\n )\n self._add_type_to_new_event(**{\n 'NEUROPORT-TIME': self.mark_beginning\n })\n\n self._add_type_to_modify_events(\n PS2=self.remove_previous_starts,\n AD_CHECK=self.modify_ad_check,\n )\n\n def clean_events(self, events):\n stim_events = np.logical_or(events['type'] == 'STIM', events['type'] == 'STIM_OFF')\n stim_events = np.logical_or(stim_events, events['type'] == 'SHAM')\n stim_event_indices = np.where(stim_events)[0]\n\n poll_events = np.where(events['type'] == 'NP_POLL')[0]\n first_poll_event = poll_events[0]\n last_poll_event = poll_events[-1]\n\n # Need the last two events (on/off) before the first np poll and the two events after the last np poll\n stim_before = np.array([index for index in stim_event_indices if index < first_poll_event - 2])\n stim_after = np.array([index for index in stim_event_indices if index > last_poll_event + 2])\n\n good_range = np.array([index for index in range(len(events)) \\\n if index not in stim_before and index not in stim_after])\n\n cleaned_events = events[good_range]\n # Remove NP_POLL\n cleaned_events = cleaned_events[cleaned_events['type'] != 'NP_POLL']\n\n return cleaned_events\n\n def mark_beginning(self, split_line):\n event = self.event_default(split_line)\n event['type'] = 'NP_POLL'\n return event\n\n def read_primary_log(self):\n if isinstance(self._primary_log, list):\n self.host_log_files = sorted(self._primary_log)\n else:\n self.host_log_files = [self._primary_log]\n contents = []\n for host_log_file in self.host_log_files:\n contents += [line.strip().split(self._SPLIT_TOKEN)\n for line in open(host_log_file).readlines()]\n return contents\n\n @staticmethod\n def persist_fields_during_stim(event):\n return ('protocol', 'subject', 'montage', 'experiment', 'session', 'eegfile', 'exp_version')\n\n def event_default(self, split_line):\n event = self._empty_event\n event.mstime = int(split_line[0]) - self.host_offset\n event.type = split_line[1]\n event.exp_version = self._exp_version\n return event.view(np.recarray)\n\n def event_version(self, split_line):\n self._exp_version = re.sub(r'[^\\d.]', '', split_line[2])\n event = self.event_default(split_line)\n event.type = 'SESS_START'\n return event\n\n def remove_previous_starts(self, events):\n starts = events.type == 'SESS_START'\n if np.count_nonzero(starts) > 1:\n starts[-1] = False\n events = events[np.logical_not(starts)]\n return events\n\n\n def event_ad_check(self, split_line):\n event = self.event_default(split_line)\n self._saw_ad = not split_line[2] == 'NOT_OBSERVED'\n event.ad_observed = self._saw_ad\n return event\n\n def modify_ad_check(self, events):\n if not self._saw_ad:\n return events\n last_ad_check = np.where(events.type == 'AD_CHECK')[0]\n if len(last_ad_check) > 1:\n start_index = last_ad_check[-2]\n else:\n start_index = 0\n events[start_index:].ad_observed = True\n return events","sub_path":"parsers/ps_log_parser.py","file_name":"ps_log_parser.py","file_ext":"py","file_size_in_byte":13070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"76313404","text":"\"\"\"\n=============\nNo Histograms\n=============\n\nSometimes marginalised histograms are not needed.\n\n\"\"\"\n\n\nfrom numpy.random import multivariate_normal, normal, seed\nfrom chainconsumer import ChainConsumer\n\nif __name__ == \"__main__\":\n seed(0)\n cov = normal(size=(3, 3))\n data = multivariate_normal(normal(size=3), 0.5 * (cov + cov.T), size=100000)\n\n c = ChainConsumer().add_chain(data)\n c.configure_general(plot_hists=False)\n fig = c.plot()\n\n fig.set_size_inches(2.5 + fig.get_size_inches()) # Resize fig for doco. You don't need this.\n\n","sub_path":"examples/customisations/plot_no_histograms.py","file_name":"plot_no_histograms.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491066271","text":"\"\"\"\nWSGI config for ReMo project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/\n\"\"\"\nimport os\nimport site\n\nfrom django.core.wsgi import get_wsgi_application\ntry:\n import newrelic.agent\nexcept ImportError:\n newrelic = False\n\nif newrelic:\n newrelic_ini = os.getenv('NEWRELIC_PYTHON_INI_FILE', False)\n if newrelic_ini:\n newrelic.agent.initialize(newrelic_ini)\n else:\n newrelic = False\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'remo.settings')\n\n# Add `remo` to the python path\nwsgidir = os.path.dirname(__file__)\nsite.addsitedir(os.path.abspath(os.path.join(wsgidir, '../')))\n\napplication = get_wsgi_application()\n\nif newrelic:\n application = newrelic.agent.wsgi_application()(application)\n","sub_path":"wsgi/playdoh.wsgi","file_name":"playdoh.wsgi","file_ext":"wsgi","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"508886225","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: mlcc\n@file: multiprocess.py\n@time: 19-4-8 下午10:26 \nDescription: 用队列解决相关的问题\n\"\"\"\n\nimport multiprocessing as mp\n\n# 重构洗盘子\nimport threading, queue\nimport time\n\n\n# 模拟一个洗盘子的人和多个烘干的进程,使用一个中间队列dish_queue\ndef washer(dishes,output):\n for dish in dishes:\n print('Washing',dish,'dish')\n output.put(dish)\n\ndef dryer(input):\n while True:\n dish = input.get()\n print('Drying',dish,'dish')\n input.task_done()\n\ndish_queue = mp.JoinableQueue()\ndryer_proc = mp.Process(target=dryer,args=(dish_queue,))\ndryer_proc.daemon = True\ndryer_proc.start()\n\ndishes = ['salad','bread','entree','dessert']\nwasher(dishes,dish_queue)\ndish_queue.join()\n\nprint('----------------使用线程来重构这个洗盘子的例子--------------------')\n\ndef new_washer(dishes,dish_queue):\n for dish in dishes:\n print('Washing ',dish)\n time.sleep(5)\n dish_queue.put(dish)\n\ndef new_dryer(dish_queue):\n while True:\n dish = dish_queue.get()\n print('drying',dish)\n time.sleep(10)\n dish_queue.task_done\n\nnew_dish_queue = queue.Queue\nfor n in range(2):\n new_dryer_thread = threading.Thread(target=dryer,args=(dish_queue,))\n new_dryer_thread.start()\n\ndishes = ['salad','bread','entree','dessert']\nwasher(dishes,dish_queue)\ndish_queue.join()\n\nif __name__ == '__main__':\n\n\n\n\n pass","sub_path":"src/reading/introducing/charpt11/multiprocess.py","file_name":"multiprocess.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"50595730","text":"import time,sys,os,re\nfrom thread2 import Thread\nfrom threading import Lock\nfrom babel_global import *\n\nclass DeadThreadError(Exception): pass\n\nclass babel_core:\n def __init__(self, globals):\n self.globals = globals\n \n self.modules_loaded = False\n \n self.module_name = \"Core\"\n self.module_status = \"Running\"\n debug_log(0,self.module_name, \"Babel version %d.%d.%d launched.\" % \n (self.globals.version_major, self.globals.version_major, self.globals.version_revision))\n debug_log(0,self.module_name,\"Core Initialized. Loading Modules\")\n if len(self.globals.babel_threads) != 0:\n debug_log(0,self.module_name, \"Threads found in thread array. Attempting to kill all threads.\")\n kill_all_threads(self.globals)\n time.sleep(5)\n \n self.load_modules()\n self.modules_loaded = True\n\n def load_modules(self):\n \n self.globals.babel_modules = [self]\n self.globals.babel_module_imports = []\n self.globals.babel_module_classes = [self]\n modules_path = self.globals.BABEL_WORKING_DIRECTORY + '/modules'\n module_dir_list = os.listdir(modules_path)\n for module_dir_item in module_dir_list:\n if os.path.isfile(modules_path + '/' + module_dir_item) and \\\n module_dir_item != self.globals.BABEL_CORE_FILENAME and \\\n module_dir_item[-3:] == '.py':\n debug_log(0,self.module_name,\"Found module: %s\" % module_dir_item[:-3])\n disable_module_load = False\n module_config_file = modules_path + '/' + module_dir_item[:-3] + '.config'\n if os.path.isfile(module_config_file):\n debug_log(0,self.module_name,\"Found config file for this module. Parsing\")\n fh = open(module_config_file)\n module_config = fh.read()\n fh.close()\n module_config = module_config.replace('\\r\\n','\\n').replace('\\r','\\n')\n module_config_lines = module_config.split('\\n')\n for config_line in module_config_lines:\n config_match = re.findall(self.globals.config_pattern,config_line.lower())\n if len(config_match) > 0:\n config_param = config_match[0][0]\n config_val = config_match[0][1]\n if (config_param == 'disable' or config_param == 'disable_module'):\n if config_val == '1' or config_val == 'yes' or config_val == 'true':\n disable_module_load = True\n\n if disable_module_load:\n debug_log(0,self.module_name,\"This module will not be loaded due to config settings.\")\n continue\n debug_log(0,self.module_name,\"Loading module.\")\n module_object = __import__(module_dir_item[:-3])\n debug_log(0,self.module_name,\"Module imported.\")\n self.globals.babel_module_imports.append(module_object)\n exec(\"module_class = module_object.%s(self.globals)\" % module_dir_item[:-3])\n self.globals.babel_module_classes.append(module_class)\n module_class.start()\n debug_log(0,self.module_name,\"Modules Loaded.\")\n \n \n def run(self):\n self.module_status = \"Monitoring modules\"\n while not self.globals.do_exit:\n module_str = \"\"\n for module in self.globals.babel_modules:\n module_str += \"%s: %s, \" % (module.module_name, module.module_status)\n if len(module_str) >= 1:\n module_str = module_str[:-2]\n else:\n module_str = \"(None)\"\n \n debug_log(11,self.module_name,\"Active Modules: %s\" % module_str)\n \n for t in self.globals.babel_module_classes:\n if t == self:\n continue\n if not t.isAlive():\n raise DeadThreadError()\n \n \n time.sleep(1)\n \n debug_log(1,self.module_name,\"Exit requested. Terminating threads.\")\n raise DeadThreadError()","sub_path":"modules/babel_core.py","file_name":"babel_core.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"501689684","text":"# Compress and decompress file using Huffamn\r\n# Author: Xingwang Cheng cheng830@umn.edu\r\n#2019-5-24\r\n\r\n# import module\r\n# os: using path.splitext to splite the filename and the filename extension\r\n# six: usingn int2byte to convert the file to bit\r\n# tkinter: using filedialog.askopenfilenames to open file(Not necessary)\r\nimport os, six, tkinter\r\n\r\n\"\"\"\r\ncompress main function\r\n\r\n\"\"\"\r\ndef compress(fileName):\r\n \r\n # 1. open file\r\n f = open(fileName, 'r') \r\n\r\n # 2. #input file as bit\r\n fileData = checkBinary(f.read()) \r\n f.close() #close file\r\n\r\n # 3. statics the frequence of each character in the file\r\n charFreq = {}\r\n for word in fileData:\r\n charFreq.setdefault(word, 0) #WAHT IT MEANS??\r\n charFreq[word] += 1\r\n\r\n # 4. Build the Huffman Tree\r\n\r\n # 4.1 initial the list of character and character-frequency\r\n sortList = sortTuple(charFreq)\r\n\r\n # 4.2 build the Huffman code dictionary\r\n codeDict = {}\r\n\r\n # 4.3 sort the tree\r\n for i in range(len(sortList) - 1):\r\n sortList = sortTuple( dict(sortList))\r\n codeDict = getCodingSchedule(sortList.pop(), sortList.pop(), sortList, codeDict)\r\n \r\n # 5. file data convert to Huffman code\r\n\r\n # 5.1 0-1 Huffman code\r\n code = ' '.join(list(codeDict.values() ))# WHAT\r\n for word in fileData:\r\n code += codeDict[word]\r\n\r\n # 5.2 if the len is not in 8, supply:\r\n codeSup = 8 - len(code)%8\r\n code += codeSup * '0'\r\n\r\n # 6. create the compressed file\r\n f = open(os.path.splitext(fileName)[0] + '.qlh', 'wb')\r\n\r\n # 6.1 input the codeSup condition\r\n f.write(six.int2byte(codeSup))\r\n\r\n # 6.2 writting the Huffman Code(totalLen+each code len+opposite char+converting inf)\r\n\r\n # 6.2.1 saving total len\r\n f.write(six.int2byte(len(codeDict)))\r\n \r\n for code in codeDict.values():\r\n f.write(six.int2byte(len(code)))\r\n\r\n # 6.2.2 saving each code opposited char\r\n for char in codeDict.keys():\r\n f.write(six.int2byte(ord(char))) \r\n\r\n # 6.3 each 8 len, convert 0-1 to decimal, and match to ASCII\r\n for code in range(len(code)//8):\r\n f.write(six.int2byte(int(code[8* i:8 + 8*i], 2)))\r\n f.flush()\r\n f.close()\r\n\r\n print('Compress success', fileName, '>>', os.path.splitext(fileName)[0]+'compressed.txt')\r\n\r\n\"\"\"\r\nDefine checkBinary Function\r\n\r\n\"\"\"\r\ndef checkBinary(inputData):\r\n\r\n #check the character \r\n outputData = '' # None default data\r\n for word in range(len(inputData)): # TRY IF WE CAN USE NOT INDEX\r\n # if the character exceed 0-255 in ASCII, convert it to ' '\r\n if ord(inputData[word]) >= 256: \r\n outputData += ' '\r\n else:\r\n outputData += inputData[word]\r\n \r\n return outputData\r\n\r\n\r\n\"\"\"\r\nDefine sortTuple function\r\n\r\n\"\"\"\r\ndef sortTuple(dict):\r\n # input dictionary, return it as ascending\r\n return sorted(dict.items(), key= lambda x: x[1], reverse= True)\r\n\r\n\r\n\"\"\"\r\nDefine getCodingSchedule function\r\n\r\n\"\"\"\r\ndef getCodingSchedule(end1, end2, sortList, codeSchedule): # WHAT\r\n if len(end1[0]) == 1:\r\n codeSchedule.setdefault(end1[0], '1')\r\n else:\r\n for key in end1[0]:\r\n codeSchedule[key] = '1' + codeSchedule[key]\r\n if len(end2[0]) == 1:\r\n codeSchedule.setdefault(end2[0], '1')\r\n else:\r\n for key in end2[0]:\r\n codeSchedule[key] = '0' + codeSchedule[key]\r\n sortList.append((end2[0]+end1[0], end1[1]+end2[1]))\r\n return codeSchedule\r\n\r\n\r\n\"\"\"\r\nDefine getKeys function\r\n\r\n\"\"\"\r\ndef getKeys(dict, value):\r\n for k, v in dict.items():\r\n if v == value:\r\n return k\r\n\r\n\r\n\"\"\"\r\nDefine compressAll function\r\n\r\n\"\"\"\r\ndef compressAll(fileNames):\r\n for file in fileNames:\r\n compress(file)\r\n\r\n\r\n\"\"\"\r\nDefine decompressAll function\r\n\r\n\"\"\"\r\ndef decompressAll(fileNames):\r\n for file in fileNames:\r\n decompress(file)\r\n\r\n\r\n\"\"\"\r\nDefine request function\r\n\r\n\"\"\"\r\ndef request():\r\n fileName = tkinter.filedialog.askopenfilenames()\r\n ask = input('Compress or Decompress? (C/D)')\r\n if ask == 'C':\r\n compressAll(fileName)\r\n elif ask == 'D':\r\n decompressAll(fileName)\r\n else:\r\n print('Wrong input')\r\n\r\n\r\n\"\"\"\r\nMain function\r\n\r\n\"\"\"\r\nif __name__ == '__main__':\r\n import tkinter.filedialog\r\n request()\r\n while input('Continue ? (Y/N)') == 'Y':\r\n request()\r\n","sub_path":"compress.py","file_name":"compress.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"80806641","text":"import json\nimport os\nimport urllib.parse\nimport urllib.request\nfrom botocore.vendored import requests\n\n\ndef respond(message):\n return {\n \"isBase64Encoded\": False,\n \"statusCode\": 200,\n \"headers\": None,\n \"body\": json.dumps(message)\n }\n\n\ndef api_get(endpoint):\n url_address = \"https://api.harvestapp.com/v2/\" + endpoint\n headers = {\n \"User-Agent\": \"LGSS SLA Dashboard (https://www.lgss-digital.co.uk/)\",\n \"Authorization\": \"Bearer \" + os.environ.get(\"HARVEST_ACCESS_TOKEN\"),\n \"Harvest-Account-ID\": os.environ.get(\"HARVEST_ACCOUNT_ID\"),\n \"is_active\": \"true\"\n }\n request = urllib.request.Request(url=url_address, headers=headers)\n response = json.loads(urllib.request.urlopen(request, timeout=5).read().decode(\"utf-8\"))\n total_pages = int(response['total_pages'])\n all_entries = []\n\n page = 1\n while page < (total_pages + 1):\n url = \"https://api.harvestapp.com/v2/\" + endpoint\n if \"?\" in url:\n url += \"&page=\" + str(page)\n else:\n url += \"?page=\" + str(page)\n\n req = urllib.request.Request(url=url, headers=headers)\n res = urllib.request.urlopen(req, timeout=5).read().decode(\"utf-8\")\n all_entries.append(res)\n page += 1\n\n return json.loads(all_entries[0])\n\n\ndef get_cost(project_id):\n json_response = api_get(\"time_entries?project_id=\" + str(project_id))\n\n filtered_response = [x for x in json_response[\"time_entries\"] if\n x['billable'] is True and x['billable_rate'] is not None]\n total = 0.0\n for time in filtered_response:\n total += time['hours'] * time['billable_rate']\n\n return total\n\n\ndef get_projects(search):\n message = []\n json_response = api_get(\"projects\")\n\n filtered_response = [x for x in json_response['projects'] if\n x['notes'] is not None and search in x['notes']]\n\n filtered_response2 = [x for x in filtered_response if\n 'cost_budget' in x and x['cost_budget'] is not None]\n\n for project in filtered_response2:\n total = get_cost(project[\"id\"])\n message.append({\n \"projectName\": project[\"name\"],\n \"budget\": project[\"cost_budget\"],\n \"spend\": total,\n \"budgetRemaining\": project[\"cost_budget\"] - total\n })\n\n return respond(message)\n\n\ndef lambda_handler(event, context):\n search = event['queryStringParameters']['search']\n return get_projects(search)\n","sub_path":"Harvest_Budget_vs_Spending.py","file_name":"Harvest_Budget_vs_Spending.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"100196537","text":"from django.urls import path\nfrom iron_parser import views\n\nurlpatterns = [\n path('remove_all_potential/', views.remove_all_potential),\n path('add_potential/', views.add_potential),\n path('remove_potential/', views.remove_potential),\n path('find_goose/', views.find_goose),\n path('remove_op_goose/', views.remove_op),\n path('parse/', views.parse, name=\"iron_parser_parse\"),\n path('create_goose/', views.create_goose, name=\"iron_parser_create_goose\"),\n path('', views.main, name=\"iron_parser_main\")\n\n]","sub_path":"iron_parser/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"160019038","text":"import logging\nimport random\nimport praw\nfrom fastapi import APIRouter\nimport pandas as pd\nfrom pydantic import BaseModel, Field, validator\n\nlog = logging.getLogger(__name__)\nrouter = APIRouter()\n\n\nclass Item(BaseModel):\n \"\"\"Use this data model to parse the request body JSON.\"\"\"\n\n x1: float = Field(..., example=3.14)\n x2: int = Field(..., example=-42)\n x3: str = Field(..., example='banjo')\n\n def to_df(self):\n \"\"\"Convert pydantic object to pandas dataframe with 1 row.\"\"\"\n return pd.DataFrame([dict(self)])\n\n @validator('x1')\n def x1_must_be_positive(cls, value):\n \"\"\"Validate that x1 is a positive number.\"\"\"\n assert value > 0, f'x1 == {value}, must be > 0'\n return value\n\n@router.post\n\n@router.post('/predict')\nasync def predict(item: Item):\n \"\"\"Make random baseline predictions for classification problem.\"\"\"\n X_new = item.to_df()\n log.info(X_new)\n y_pred = random.choice([True, False])\n y_pred_proba = random.random() / 2 + 0.5\n return {\n 'title': \"Is Fusion nullified for the Extreme Z Awakening Event?\",\n 'post': \"On JP I missed out on my chance to do SSJ3 Goku the first time so I'm doing it now. Been lucked out of rotations for most of these stages and I've noticed that for my Fusions team, LR Gogeta would NEVER fuse. I'm genuinely curious if the mechanic is nullified for the event or i'm just getting AWFUL RNG.\",\n 'prediction': [\"DBZDokkanBattle\", \"Subreddit2\", \"Subreddit3\", \"Subreddit4\", \"Subreddit5\", \"Subreddit6\", \"Subreddit7\", \"Subreddit8\", \"Subreddit9\", \"Subreddit10\"] \n }","sub_path":"app/routers/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491718442","text":"from openpyxl import Workbook\nfrom openpyxl import load_workbook\nimport os\n\nfName = input(\"저장할 파일 이름을 입력하세요: \")\n'''\n# pandas\npath = r'C:/Users/LXPER MINI001/Desktop/잡업/고2 2014년 사설'\ndata = pd.read_excel(path)\n\n'''\nwb = Workbook()\nws1 = wb.active\n\nws1.title = '주장'\nws1.append(['번호','유형','문제','지문','정답','보기1','보기2','보기3','보기4','보기5','별단어','출처'])\nws2 = wb.create_sheet('요지')\nws2.append(['번호','유형','문제','지문','정답','보기1','보기2','보기3','보기4','보기5','별단어','출처'])\nws3 = wb.create_sheet('주제')\nws3.append(['번호','유형','문제','지문','정답','보기1','보기2','보기3','보기4','보기5','별단어','출처'])\nws4 = wb.create_sheet('제목')\nws4.append(['번호','유형','문제','지문','정답','보기1','보기2','보기3','보기4','보기5','별단어','출처'])\nws5 = wb.create_sheet('심경_분위기_변화')\nws5.append(['번호','유형','문제','지문','정답','보기1','보기2','보기3','보기4','보기5','별단어','출처'])\nws6 = wb.create_sheet('빈칸(구,절)')\nws6.append(['번호','유형','문제','지문','정답','보기1','보기2','보기3','보기4','보기5','별단어','출처'])\nws7 = wb.create_sheet('빈칸(단어)')\nws7.append(['번호','유형','문제','지문','정답','보기1','보기2','보기3','보기4','보기5','별단어','출처'])\nws8 = wb.create_sheet('연결사')\nws8.append(['번호','유형','문제','지문','정답','보기1','보기2','보기3','보기4','보기5','별단어','출처'])\nws9 = wb.create_sheet('요약(구,절)')\nws9.append(['번호','유형','문제','지문','정답','보기1','보기2','보기3','보기4','보기5','별단어','출처','요약문'])\nws10 = wb.create_sheet('요약(단어)')\nws10.append(['번호','유형','문제','지문','정답','보기1','보기2','보기3','보기4','보기5','별단어','출처','요약문'])\n\n\nos.chdir(\"C:/Users/LXPER MINI001/Desktop/잡업/고2 2014년 사설\")\nfilelist = os.listdir()\nfor files in filelist:\n if files.endswith(\".xlsx\"):\n\n load_wb = load_workbook(files, data_only=True)\n load_ws = load_wb['Sheet']\n\n\n typeList = []\n for row in load_ws.rows:\n typeList.append(row[1].value)\n\n for row in load_ws.iter_rows(min_row=2,max_row=10,min_col=1,max_col=13):\n temp = []\n for cell in row:\n temp.append(cell.value)\n\n print(temp)\n qType = temp[1]\n if(qType == '주장'): ws1.append(temp)\n elif(qType == '요지'):ws2.append(temp)\n elif (qType == '주제'):ws3.append(temp)\n elif (qType == '제목'):ws4.append(temp)\n elif (qType == '심경_분위기_변화'):ws5.append(temp)\n elif (qType == '빈칸(구,절)'):ws6.append(temp)\n elif(qType == '빈칸(단어)'): ws7.append(temp)\n\n # 빈칸(AB) ?\n\n elif (qType == '연결사'):ws8.append(temp)\n elif(qType == '요약(구,절)'):ws9.append(temp)\n elif(qType == '요약(단어)'): ws10.append(temp)\n\n\nwb.save(filename='C:/Users/LXPER MINI001/Desktop/잡업/{}.xlsx'.format(fName))\n","sub_path":"classificate/excel_to_excel.py","file_name":"excel_to_excel.py","file_ext":"py","file_size_in_byte":3242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"28121399","text":"# HASH Mode Package\n# Released under MIT License\n# Copyright (c) 2020 TytusDb Team\n\n\nfrom storage.hash import Tabla, serealizar\nimport os, re\n\ntable_name_pattern = \"^[a-zA-Z_][a-zA-Z0-9#@$_]*\"\n\n\nclass BaseDatos:\n \n def __init__(self, Name, main_path):\n self.Name = Name\n self.list_table = []\n self.main_path = main_path\n self.tabla_actual = None\n for tabla in os.listdir(self.main_path):\n temp = tabla.replace(\".bin\",\"\")\n self.list_table.append(temp)\n\n\n # == BUSCAR TABLA\n def Buscar(self, table):\n existe = False\n i = -1\n for tabla in self.list_table:\n if tabla.casefold() == table.casefold():\n existe = True\n i=self.list_table.index(tabla)\n break\n else:\n existe = False\n \n salida = [existe, i]\n return salida\n\n # == DEVUELVE EL OBJETO PARA LA INTERFAZ GRÁFICA\n def Guardar(self):\n serealizar.commit(self.tabla_actual, self.tabla_actual.nombre, self.main_path)\n\n\n def Cargar(self, table):\n try:\n\n if self.tabla_actual.nombre.casefold()==table.casefold():\n return self.tabla_actual\n elif table in self.list_table:\n self.tabla_actual = serealizar.rollback(table, self.main_path)\n return self.tabla_actual\n else:\n return False\n\n except:\n \n if table in self.list_table:\n self.tabla_actual = serealizar.rollback(table, self.main_path)\n return self.tabla_actual\n else:\n return False\n\n # == CREAR TABLAS\n def createTable(self, tableName, numberColumns):\n salida = self.Buscar(tableName)\n if salida[0]:\n return 3\n else:\n try:\n if re.search(table_name_pattern, tableName):\n self.list_table.append(tableName)\n temp = Tabla.Tabla(tableName, numberColumns)\n serealizar.commit(temp, tableName, self.main_path)\n return 0\n else:\n return 1\n except:\n return 1\n\n # == MOSTRAR TABLAS\n def showTables(self):\n return self.list_table\n\n # === EXTRAER INFORMACIÓN\n def extractTable(self, table):\n salida = self.Buscar(table)\n if salida[0]:\n try:\n tabla = self.Cargar(table) \n return tabla.extractTable()\n except:\n return None\n else:\n return None\n \n # === EXTRA Y DEVUELVE LISTA DE ELEMENTOS DE UN RANGO ESPECIFICO \n def extractRangeTable(self, table, columnNumber, lower, upper):\n salida = self.Buscar(table)\n if salida[0]:\n try:\n tabla = self.Cargar(table) \n return tabla.extractRangeTable(columnNumber, lower, upper)\n except:\n return 1\n else:\n return None\n \n # == LLAVES PRIMARIAS Y FORÁNEAS \n def alterAddPK(self, table, columns):\n salida = self.Buscar(table)\n if salida[0]:\n try:\n if len(columns) == 0:\n return 1\n else: \n temp = self.Cargar(table) \n var = temp.alterAddPK(columns)\n self.Guardar()\n return var\n except:\n return 1\n else:\n return 3\n\n def alterDropPK(self, table):\n salida = self.Buscar(table)\n if salida[0]:\n try:\n temp = self.Cargar(table) \n var = temp.alterDropPK()\n self.Guardar()\n return var\n except:\n return 1\n else: \n return 3 \n\n # == CAMBIAR NOMBRES\n def alterTable(self, tableOld, tableNew):\n salida = self.Buscar(tableOld)\n if salida[0]:\n try:\n temp = serealizar.rollback(tableOld, self.main_path)\n comprobar = self.Buscar(tableNew)\n if comprobar[0] == False: \n if re.search(table_name_pattern, tableOld) and re.search(table_name_pattern, tableNew):\n os.remove(self.main_path+\"\\\\\"+tableOld+\".bin\")\n self.list_table[salida[1]]= tableNew\n temp.alterTable(tableNew)\n serealizar.commit(temp, tableNew, self.main_path)\n return 0\n else:\n return 1\n\n else:\n return 4\n except:\n return 1\n else:\n return 3\n \n\n# === AGREGAR N-ESIMA COLUMNA\n def alterAddColumn(self, table, default):\n salida = self.Buscar(table)\n if salida[0]:\n try:\n temp = self.Cargar(table) \n var = temp.alterAddColumn(default)\n self.Guardar()\n return var\n except:\n return 1\n else:\n return 3\n\n # === ELIMINAR N-ESIMA COLUMNA\n def alterDropColumn(self, table, columnNumber):\n salida = self.Buscar(table)\n if salida[0]:\n try:\n temp = self.Cargar(table) \n var = temp.alterDropColumn(columnNumber)\n self.Guardar()\n return var\n except:\n return 1\n else:\n return 3\n\n # === ELIMINAR TABLA\n def dropTable(self,tableName):\n salida = self.Buscar(tableName)\n if salida[0]:\n try:\n self.list_table.pop(salida[1])\n os.remove(self.main_path+\"\\\\\"+tableName+\".bin\")\n return 0\n except:\n return 1\n else:\n return 3\n\n \n # === GRAFICAR LAS TABLAS QUE CONTIENE LA BD\n def graficar(self):\n file = open('tablas.dot', \"w\")\n file.write(\"digraph grafica{\" + os.linesep)\n file.write(\"rankdir=LR;\" + os.linesep)\n info = \"{\"\n j = 0\n for i in self.list_table:\n if j == 0:\n info += i+ os.linesep\n else:\n info += \"|\"+i+ os.linesep\n j = j+1\n file.write('tabla[shape=record label=\"'+info+'}\"];')\n file.write(' }' + os.linesep)\n file.close()\n os.system('dot -Tpng tablas.dot -o tablas.png')\n","sub_path":"storage/fase2/team06/storage/hash/BaseDatos.py","file_name":"BaseDatos.py","file_ext":"py","file_size_in_byte":6644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"514509989","text":"import os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test\nimport sys\n\n\ndef _get_src_dir(package_name=None):\n '''Get the source directory by looking in src, or if package_name is\n given, return that combined with the src directory.\n\n '''\n ret = ''\n if 'PY_PACKAGE_NAME' in os.environ:\n ret = os.path.join(BASE_DIR, 'src', os.environ['PY_PACKAGE_NAME'])\n elif package_name:\n ret = os.path.join(BASE_DIR, 'src', package_name)\n if not ret:\n dirs_with_about = [dir for dir in os.listdir('src')\n if '__about__.py' in os.listdir('src/'+dir)]\n if len(dirs_with_about) > 1:\n print('More than one source with __about__.py defined, set PY_PACKAGE_NAME and re-run.')\n exit\n else:\n ret = os.path.join(BASE_DIR, 'src', dirs_with_about.pop())\n return ret\n\n\nBASE_DIR = os.path.dirname(__file__)\nSRC_DIR = _get_src_dir()\nabout = {}\nwith open(os.path.join(SRC_DIR, \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = [os.path.join(BASE_DIR, \"tests\")]\n self.test_suite = True\n\n def run_tests(self):\n # import here, since outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\nsetup(\n author=about['__author__'],\n author_email=about['__author_email__'],\n classifiers=about['__classifiers__'],\n cmdclass={'test': PyTest},\n description=about['__package_name__'].replace('_', ' ').title(),\n entry_points=about['__entry_points__'],\n install_requires=about['__requires__'],\n license=about['__license__'],\n long_description=about['__desc__'],\n name=about['__package_name__'],\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=['tests', 'tests.*']),\n platforms='any',\n scripts=about['__scripts__'],\n tests_require=['pytest'],\n url=about['__url__'],\n version=about['__version__'],\n zip_safe=False\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"445893718","text":"import numpy as np\nimport time\nfrom numba import jit\n\nRISKFREE = 0.02\nVOLATILITY = 0.30\n\n\n# -----------------------------------------------------------------------------\n# Modify the functions cnd, and black_scholes to use Numba and make them run as\n# fast as possible.\n# -----------------------------------------------------------------------------\n\n\n@jit(nopython=True)\ndef cnd(d):\n A1 = 0.31938153\n A2 = -0.356563782\n A3 = 1.781477937\n A4 = -1.821255978\n A5 = 1.330274429\n RSQRT2PI = 0.39894228040143267793994605993438\n K = 1.0 / (1.0 + 0.2316419 * np.abs(d))\n ret_val = (RSQRT2PI * np.exp(-0.5 * d * d) *\n (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))))\n if d > 0:\n ret_val = 1.0 - ret_val\n return ret_val\n\n\n@jit(nopython=True)\ndef black_scholes(callResult, putResult, stockPrice, optionStrike, optionYears,\n Riskfree, Volatility):\n\n R = Riskfree\n V = Volatility\n Vsqr05 = 0.5 * V * V\n\n for i in range(len(stockPrice)):\n S = stockPrice[i]\n X = optionStrike[i]\n T = optionYears[i]\n sqrtT = np.sqrt(T)\n\n d1 = (np.log(S / X) + (R + Vsqr05) * T) / (V * sqrtT)\n d2 = d1 - V * sqrtT\n cndd1 = cnd(d1)\n cndd2 = cnd(d2)\n expRT = np.exp(- R * T)\n\n callResult[i] = (S * cndd1 - X * expRT * cndd2)\n putResult[i] = (X * expRT * (1.0 - cndd2) - S * (1.0 - cndd1))\n\n\n# -----------------------------------------------------------------------------\n# Reference implementation - do not modify this. It is used to compare the\n# output and performance of your modified version.\n# -----------------------------------------------------------------------------\n\n\ndef cnd_reference(d):\n A1 = 0.31938153\n A2 = -0.356563782\n A3 = 1.781477937\n A4 = -1.821255978\n A5 = 1.330274429\n RSQRT2PI = 0.39894228040143267793994605993438\n K = 1.0 / (1.0 + 0.2316419 * np.abs(d))\n ret_val = (RSQRT2PI * np.exp(-0.5 * d * d) *\n (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))))\n return np.where(d > 0, 1.0 - ret_val, ret_val)\n\n\ndef black_scholes_reference(callResult, putResult, stockPrice, optionStrike,\n optionYears, Riskfree, Volatility):\n S = stockPrice\n X = optionStrike\n T = optionYears\n R = Riskfree\n V = Volatility\n sqrtT = np.sqrt(T)\n d1 = (np.log(S / X) + (R + 0.5 * V * V) * T) / (V * sqrtT)\n d2 = d1 - V * sqrtT\n cndd1 = cnd_reference(d1)\n cndd2 = cnd_reference(d2)\n\n expRT = np.exp(- R * T)\n callResult[:] = (S * cndd1 - X * expRT * cndd2)\n putResult[:] = (X * expRT * (1.0 - cndd2) - S * (1.0 - cndd1))\n\n\n# -----------------------------------------------------------------------------\n# For execution of the test\n# -----------------------------------------------------------------------------\n\n\ndef randfloat(rand_var, low, high):\n return (1.0 - rand_var) * low + rand_var * high\n\n\ndef main (*args):\n OPT_N = 4000000\n iterations = 10\n if len(args) >= 1:\n iterations = int(args[0])\n \n stockPrice = randfloat(np.random.random(OPT_N), 5.0, 30.0)\n optionStrike = randfloat(np.random.random(OPT_N), 1.0, 100.0)\n optionYears = randfloat(np.random.random(OPT_N), 0.25, 10.0)\n\n callResultOptimised = np.zeros(OPT_N)\n putResultOptimised = -np.ones(OPT_N)\n \n time0 = time.time()\n for i in range(iterations):\n black_scholes(callResultOptimised, putResultOptimised, stockPrice,\n optionStrike, optionYears, RISKFREE, VOLATILITY)\n time1 = time.time()\n print(\"Optimised time: %f msec\" % ((time1 - time0) / iterations * 1000))\n\n callResultReference = np.zeros(OPT_N)\n putResultReference = -np.ones(OPT_N)\n \n time0 = time.time()\n for i in range(iterations):\n black_scholes_reference(callResultReference, putResultReference,\n stockPrice, optionStrike, optionYears,\n RISKFREE, VOLATILITY)\n time1 = time.time()\n print(\"Reference time: %f msec\" % ((time1 - time0) / iterations * 1000))\n\n delta = np.abs(callResultOptimised - callResultReference)\n L1norm = delta.sum() / np.abs(callResultReference).sum()\n print(\"L1 norm: %E\" % L1norm)\n print(\"Max absolute error: %E\" % delta.max())\n np.testing.assert_allclose(callResultOptimised, callResultReference)\n\n\nif __name__ == \"__main__\":\n import sys\n main(*sys.argv[1:])\n","sub_path":"pycon-uk-2015/example_codes/blackscholes_solution.py","file_name":"blackscholes_solution.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"226880366","text":"import os\nimport numpy as np\nimport time\nimport datetime\nimport torch\nimport torchvision\nimport gc\nfrom torch import optim\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom evaluation import *\nfrom loss_function import *\nfrom network import DCU_Net_16, DCGRU_Net_16, DCGRU_Net_22, LDCGRU_Net_16, DCRN_Net_10\nimport csv\nfrom istft import ISTFT\nfrom utils.signalprocess import analysis_window\n\n\ndef wSDRLoss(mixed, clean, clean_est, eps=2e-7):\n # Used on signal level(time-domain). Backprop-able istft should be used.\n # Batched audio inputs shape (N x T) required.\n bsum = lambda x: torch.sum(x, dim=1) # Batch preserving sum for convenience.\n\n def mSDRLoss(orig, est):\n # Modified SDR loss, / (||x|| * ||x`||) : L2 Norm.\n # Original SDR Loss: **2 / (== ||x`||**2)\n # > Maximize Correlation while producing minimum energy output.\n correlation = bsum(orig * est)\n energies = torch.norm(orig, p=2, dim=1) * torch.norm(est, p=2, dim=1)\n return -(correlation / (energies + eps))\n\n noise = mixed - clean\n noise_est = mixed - clean_est\n a = bsum(clean ** 2) / (bsum(clean ** 2) + bsum(noise ** 2) + eps)\n wSDR = a * mSDRLoss(clean, clean_est) + (1 - a) * mSDRLoss(noise, noise_est)\n return torch.mean(wSDR)\n\n\nclass Solver(object):\n def __init__(self, config, train_reader, valid_reader):\n # Data reader\n self.train_reader = train_reader\n self.valid_reader = valid_reader\n # Models\n self.unet = None\n self.optimizer = None\n self.img_ch = config.img_ch\n self.output_ch = config.output_ch\n self.criterion = torch.nn.MSELoss()\n # Hyper-parameters\n self.lr = config.lr\n self.beta1 = config.beta1\n self.beta2 = config.beta2\n ## User-parameters\n self.half_lr = config.half_lr\n self.prev_cv_loss = float(\"inf\")\n self.best_cv_loss = float(\"inf\")\n self.best_tr_loss = float(\"inf\")\n self.having = False\n # Training settings\n self.num_epochs = config.num_epochs\n self.num_epochs_decay = config.num_epochs_decay\n self.batch_size = config.batch_size\n # Step size\n self.log_step = config.log_step\n self.val_step = config.val_step\n # Path\n self.model_path = config.model_path\n self.result_path = config.result_path\n self.mode = config.mode\n # Device\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # self.device = torch.device('cpu')\n # Model\n self.model_type = config.model_type\n self.t = config.t\n self.build_model()\n\n def build_model(self):\n \"\"\"Build generator and discriminator.\"\"\"\n if self.model_type == 'DCU_Net_16':\n self.unet = DCU_Net_16()\n elif self.model_type == 'DCGRU_Net_16':\n self.unet = DCGRU_Net_16()\n elif self.model_type == 'DCGRU_Net_22':\n self.unet = DCGRU_Net_22()\n elif self.model_type == 'LDCGRU_Net_16':\n self.unet = LDCGRU_Net_16()\n elif self.model_type == 'DCRN_Net_10':\n self.unet = DCRN_Net_10()\n\n self.optimizer = optim.Adam(list(self.unet.parameters()),\n self.lr, [self.beta1, self.beta2])\n self.unet.to(self.device)\n\n # self.print_network(self.unet, self.model_type)\n\n def print_network(self, model, name):\n \"\"\"Print out the network information.\"\"\"\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))\n\n def reset_grad(self):\n \"\"\"Zero the gradient buffers.\"\"\"\n self.unet.zero_grad()\n\n def tensor2img(self, x):\n img = (x[:, 0, :, :] > x[:, 1, :, :]).float()\n img = img * 255\n return img\n\n def train(self):\n \"\"\"Train encoder, generator and discriminator.\"\"\"\n # ====================================== Training ===========================================#\n # ===========================================================================================#\n # Net Train\n lr = self.lr\n start_epoch = -1\n if not os.path.isdir(\"./models/checkpoint\"):\n os.mkdir(\"./models/checkpoint\")\n checkpoint_path = './models/checkpoint/%s_ckpt_best.pth' % self.model_type # 断点路径\n if os.path.exists(checkpoint_path):\n checkpoint = torch.load(checkpoint_path) # 加载断点\n self.unet.load_state_dict(checkpoint['net']) # 加载模型可学习参数\n self.optimizer.load_state_dict(checkpoint['optimizer']) # 加载优化器参数\n start_epoch = checkpoint['epoch']\n\n for epoch in range(start_epoch + 1, self.num_epochs):\n self.unet.train(True)\n train_loss = 0\n start = time.time()\n # for i, (tinput_featr, tinput_feati, toutput_feat, des_sig, tar_sig) in enumerate(self.train_reader):\n while self.train_reader.is_running_out() is not True:\n # print(self.train_reader.next_consume_idx)\n tinput_featr, tinput_feati, toutput_feat, des_sig, tar_sig = self.train_reader.next_batch()\n input_featr = torch.tensor(tinput_featr, dtype=torch.float32).to(self.device)\n input_feati = torch.tensor(tinput_feati, dtype=torch.float32).to(self.device)\n clean_feat = torch.tensor(toutput_feat, dtype=torch.float32).to(self.device)\n # network forword processing\n if self.model_type == 'DCU_Net_16':\n clean_flat = clean_feat.view(clean_feat.size(0), -1)\n estimate_real, estimate_imag = self.unet(input_featr, input_feati)\n estimate_feat = torch.cat([estimate_real, estimate_imag], 1)\n estimate_flat = estimate_feat.view(estimate_feat.size(0), -1)\n loss = self.criterion(estimate_flat, clean_flat)\n elif self.model_type == 'DCGRU_Net_16':\n clean_flat = clean_feat.view(clean_feat.size(0), -1)\n estimate_real, estimate_imag = self.unet(input_featr, input_feati)\n estimate_feat = torch.cat([estimate_real, estimate_imag], 1)\n estimate_flat = estimate_feat.view(estimate_feat.size(0), -1)\n loss = self.criterion(estimate_flat, clean_flat)\n elif self.model_type == 'DCGRU_Net_22':\n clean_flat = clean_feat.view(clean_feat.size(0), -1)\n estimate_real, estimate_imag = self.unet(input_featr, input_feati)\n estimate_feat = torch.cat([estimate_real, estimate_imag], 1)\n estimate_flat = estimate_feat.view(estimate_feat.size(0), -1)\n loss = self.criterion(estimate_flat, clean_flat)\n elif self.model_type == 'LDCGRU_Net_16':\n clean_flat = clean_feat.view(clean_feat.size(0), -1)\n estimate_real, estimate_imag = self.unet(input_featr, input_feati)\n estimate_feat = torch.cat([estimate_real, estimate_imag], 1)\n estimate_flat = estimate_feat.view(estimate_feat.size(0), -1)\n loss = self.criterion(estimate_flat, clean_flat)\n elif self.model_type == 'DCRN_Net_10':\n clean_flat = clean_feat.view(clean_feat.size(0), -1)\n estimate_real, estimate_imag = self.unet(input_featr, input_feati)\n estimate_feat = torch.cat([estimate_real, estimate_imag], 1)\n estimate_flat = estimate_feat.view(estimate_feat.size(0), -1)\n loss = self.criterion(estimate_flat, clean_flat)\n\n train_loss += loss.item()\n # Backprop + optimize\n self.reset_grad()\n loss.backward()\n self.optimizer.step()\n\n # Print the log info\n gc.collect()\n self.train_reader.reset()\n print(\"[Trainning] [%d/%d], Elipse Time: %4f, Train Loss: %4f\" % (\n epoch + 1, self.num_epochs, time.time() - start, train_loss))\n if train_loss < self.best_tr_loss:\n self.best_tr_loss = train_loss\n best_epoch = epoch\n best_unet_path = os.path.join(self.model_path, '%s-%d-%d-train.pkl' % (\n self.model_type, self.num_epochs, best_epoch))\n best_unet = self.unet.state_dict()\n print('Find better train model, Best %s model loss : %.4f' % (self.model_type, train_loss))\n torch.save(best_unet, best_unet_path)\n checkpoint = {\n \"net\": self.unet.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n \"epoch\": epoch\n }\n torch.save(checkpoint, checkpoint_path)\n\n # ===================================== Cross Validation =================================#\n start = time.time()\n self.unet.train(False)\n self.unet.eval()\n valid_loss = 0\n for i, (tinput_featr, tinput_feati, toutput_feat, des_sig, tar_sig) in enumerate(self.valid_reader):\n input_featr = torch.tensor(tinput_featr, dtype=torch.float32).to(self.device)\n input_feati = torch.tensor(tinput_feati, dtype=torch.float32).to(self.device)\n clean_feat = torch.tensor(toutput_feat, dtype=torch.float32).to(self.device)\n # network forword processing\n if self.model_type == 'DCU_Net_16':\n clean_flat = clean_feat.view(clean_feat.size(0), -1)\n estimate_real, estimate_imag = self.unet(input_featr, input_feati)\n estimate_feat = torch.cat([estimate_real, estimate_imag], 1)\n estimate_flat = estimate_feat.view(estimate_feat.size(0), -1)\n loss = self.criterion(estimate_flat, clean_flat)\n elif self.model_type == 'DCGRU_Net_16':\n clean_flat = clean_feat.view(clean_feat.size(0), -1)\n estimate_real, estimate_imag = self.unet(input_featr, input_feati)\n estimate_feat = torch.cat([estimate_real, estimate_imag], 1)\n estimate_flat = estimate_feat.view(estimate_feat.size(0), -1)\n loss = self.criterion(estimate_flat, clean_flat)\n elif self.model_type == 'DCGRU_Net_22':\n clean_flat = clean_feat.view(clean_feat.size(0), -1)\n estimate_real, estimate_imag = self.unet(input_featr, input_feati)\n estimate_feat = torch.cat([estimate_real, estimate_imag], 1)\n estimate_flat = estimate_feat.view(estimate_feat.size(0), -1)\n loss = self.criterion(estimate_flat, clean_flat)\n elif self.model_type == 'LDCGRU_Net_16':\n clean_flat = clean_feat.view(clean_feat.size(0), -1)\n estimate_real, estimate_imag = self.unet(input_featr, input_feati)\n estimate_feat = torch.cat([estimate_real, estimate_imag], 1)\n estimate_flat = estimate_feat.view(estimate_feat.size(0), -1)\n loss = self.criterion(estimate_flat, clean_flat)\n\n valid_loss += loss.item()\n\n print('[Validation] [%d/%d], Elipse Time: %4f, Validation Loss: %.4f' % (\n epoch + 1, self.num_epochs, time.time() - start, valid_loss))\n # Decay learning rate\n if (epoch + 1) > (self.num_epochs - self.num_epochs_decay):\n lr -= (self.lr / float(self.num_epochs_decay))\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n print('Decay learning rate to lr: {}.'.format(lr))\n\n if valid_loss < self.best_cv_loss:\n self.best_cv_loss = valid_loss\n best_epoch = epoch\n best_unet_path = os.path.join(self.model_path, '%s-%d-%d-valid.pkl' % (\n self.model_type, self.num_epochs, best_epoch))\n best_unet = self.unet.state_dict()\n print('Find better valid model, Best %s model loss : %.4f\\n' % (self.model_type, valid_loss))\n torch.save(best_unet, best_unet_path)\n","sub_path":"DCGRU_Net_22-master/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":12607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"246488506","text":"# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pytest\n\nimport cirq\nimport cirq.contrib.acquaintance as cca\nimport cirq.contrib.routing as ccr\n\n\n@pytest.mark.parametrize(\n 'circuit,device_graph,algo', [(cirq.testing.random_circuit(\n 10, 30, 0.5), ccr.get_grid_device_graph(4, 3), algo)\n for algo in ccr.ROUTERS\n for _ in range(5)] +\n [(cirq.Circuit(), ccr.get_grid_device_graph(4, 3), 'greedy')])\ndef test_route_circuit(circuit, device_graph, algo):\n swap_network = ccr.route_circuit(circuit, device_graph, algo_name=algo)\n assert set(swap_network.initial_mapping).issubset(device_graph)\n assert (sorted(swap_network.initial_mapping.values()) == sorted(\n circuit.all_qubits()))\n assert ccr.ops_are_consistent_with_device_graph(\n swap_network.circuit.all_operations(), device_graph)\n assert ccr.is_valid_routing(circuit, swap_network)\n\n\n@pytest.mark.parametrize(\n 'circuit,device_graph,algo,make_bad', [(cirq.testing.random_circuit(\n 4, 8, 0.5), ccr.get_grid_device_graph(3, 2), algo, make_bad)\n for algo in ccr.ROUTERS\n for make_bad in (False, True)\n for _ in range(5)] +\n [(cirq.Circuit(), ccr.get_grid_device_graph(3, 2), 'greedy', False)])\ndef test_route_circuit_via_unitaries(circuit, device_graph, algo, make_bad):\n swap_network = ccr.route_circuit(circuit, device_graph, algo_name=algo)\n\n logical_qubits = sorted(circuit.all_qubits())\n if len(logical_qubits) < 2:\n return\n reverse_mapping = {l: p for p, l in swap_network.initial_mapping.items()}\n physical_qubits = [reverse_mapping[l] for l in logical_qubits]\n physical_qubits += list(set(device_graph).difference(physical_qubits))\n n_unused_qubits = len(physical_qubits) - len(logical_qubits)\n\n if make_bad:\n swap_network.circuit += [cirq.CNOT(*physical_qubits[:2])]\n cca.return_to_initial_mapping(swap_network.circuit)\n\n logical_unitary = circuit.unitary(qubit_order=logical_qubits)\n logical_unitary = np.kron(logical_unitary, np.eye(1 << n_unused_qubits))\n physical_unitary = swap_network.circuit.unitary(qubit_order=physical_qubits)\n\n assert ccr.is_valid_routing(circuit, swap_network) == (not make_bad)\n assert np.allclose(physical_unitary, logical_unitary) == (not make_bad)\n\n\ndef test_router_bad_args():\n circuit = cirq.Circuit()\n device_graph = ccr.get_linear_device_graph(5)\n with pytest.raises(ValueError):\n ccr.route_circuit(circuit, device_graph)\n\n algo_name = 'greedy'\n with pytest.raises(ValueError):\n ccr.route_circuit(circuit,\n device_graph,\n algo_name=algo_name,\n router=ccr.ROUTERS[algo_name])\n\n circuit = cirq.Circuit(cirq.CCZ(*cirq.LineQubit.range(3)))\n with pytest.raises(ValueError):\n ccr.route_circuit(circuit, device_graph, algo_name=algo_name)\n\n circuit = cirq.Circuit(\n cirq.CZ(cirq.LineQubit(i), cirq.LineQubit(i + 1)) for i in range(5))\n with pytest.raises(ValueError):\n ccr.route_circuit(circuit, device_graph, algo_name=algo_name)\n","sub_path":"cirq/contrib/routing/router_test.py","file_name":"router_test.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"451242638","text":"class Solution:\n def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n graph = defaultdict(list)\n for prerequisite in prerequisites:\n graph[prerequisite[0]].append(prerequisite[1])\n visited = [0]*numCourses\n def dfs(course):\n if visited[course] == 1: return True\n if visited[course] == 2: return False\n \n visited[course] = 1\n for prereq in graph[course]:\n if dfs(prereq): return True\n visited[course] = 2\n return False\n for course in list(graph.keys()):\n if dfs(course): return False\n return True\n ","sub_path":"207. Course Schedule.py","file_name":"207. Course Schedule.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"98419272","text":"import neuralnetworksA2 as nn\nimport numpy as np\nimport pandas as pd\nimport pprint as pp\nimport matplotlib.pyplot as plt\n\nclass NeuralNetworkReLU(nn.NeuralNetwork):\n\tdef __init__(self, ni, nh, no):\n\t\tsuper(NeuralNetworkReLU, self).__init__(ni, nh, no)\n\n\tdef activation(self, weighted_sum):\n\t\treturn np.maximum(0, weighted_sum)\n\n\tdef activationDerivative(self, activation_value):\n\t\tactDer = np.copy(activation_value)\n\t\tactDer[actDer <= 0] = 0\n\t\tactDer[actDer > 0] = 1\n\t\treturn actDer\n\ndef partition(X, T, fraction, shuffle):\n nRows = X.shape[0]\n nTrain = int(round(fraction*nRows)) \n nTest = nRows - nTrain\n\n rows = np.arange(nRows)\n\n if(shuffle == True):\n np.random.shuffle(rows)\n\n trainIndices = rows[:nTrain]\n testIndices = rows[nTrain:]\n\n Xtrain = X[trainIndices, :]\n Ttrain = T[trainIndices, :]\n Xtest = X[testIndices, :]\n Ttest = T[testIndices, :]\n \n return Xtrain, Ttrain, Xtest, Ttest\n\ndef rmse(A, B):\n return np.sqrt(np.mean((A - B)**2))\n\nif __name__ == '__main__':\n\t\n\t# Load the csv data.\n\tdframe = pd.read_csv('energydata_complete.csv', sep=',',header=None)\n\t# Filter out required columns.\n\t#dframe = dframe.drop(dframe.columns[[0, -2, -1]], axis=1)\n\n\t# Get target.\n\tTd = dframe.iloc[1:, [1]]\n\tTd = Td.as_matrix()\n\tT = Td.astype(float)\n\n\t# Get input.\n\tXd = dframe.iloc[1:, 2:-2]\n\tXd = Xd.as_matrix()\n\tX = Xd.astype(float)\n\t\n\t# Smaller dataset\n\tX = np.arange(5).reshape((-1,1))\n\tT = np.sin(X)\n\n\t# Comparision\n\t#hiddenLayers = [[u]*nl for u in [1, 2, 5, 10, 50] for nl in [1, 2, 3, 4, 5, 10]]\n\thiddenLayers = [[1], [1,1], [5], [5,5], [10]]\n\ttanHlist = []\n\tReLUlist = []\n\tfor actFun in [nn.NeuralNetwork, NeuralNetworkReLU]:\n\t\tfor hidden in hiddenLayers:\n\t\t\t# Create list for storing RMSE.\n\t\t\trmseTrainList = []\n\t\t\trmseTestList = [] \n\t\t\tfor i in range(10):\n\t\t\t\tXtrain, Ttrain, Xtest, Ttest = partition(X, T, 0.8, shuffle = False)\n\t\t\t\tnnet = actFun(Xtrain.shape[1], hidden, Ttrain.shape[1])\n\t\t\t\tnnet.train(Xtrain, Ttrain, 100)\n\t\t\t\trmseTrain = rmse(Ttrain, nnet.use(Xtrain))\n\t\t\t\trmseTest = rmse(Ttest, nnet.use(Xtest))\n\t\t\t\trmseTrainList.append(rmseTrain)\n\t\t\t\trmseTestList.append(rmseTest)\n\t\t\trmseTrainMean = sum(rmseTrainList)/len(rmseTrainList)\n\t\t\trmseTestMean = sum(rmseTestList)/len(rmseTestList)\n\t\t\tif(actFun == nn.NeuralNetwork):\n\t\t\t\ttanHlist.append([hidden, rmseTrainMean, rmseTestMean])\n\t\t\telse:\n\t\t\t\tReLUlist.append([hidden, rmseTrainMean, rmseTestMean])\n\t\n\tprint(\"\\n\\n 1. tanH:\")\n\ttanHlist = pd.DataFrame(tanHlist)\n\tpp.pprint(tanHlist)\n\t\n\tprint(\"\\n\\n 2. ReLUlist:\")\n\tReLUlist = pd.DataFrame(ReLUlist)\n\tpp.pprint(ReLUlist)\n\n\tplt.figure(figsize = (10, 10))\n\tplt.plot(tanHlist.values[:, 1], 'b', label = 'tanH Train RMSE')\n\tplt.plot(tanHlist.values[:, 2], 'g', label = 'tanH Test RMSE')\n\tplt.plot(ReLUlist.values[:, 1], 'm', label = 'ReLU Train RMSE')\n\tplt.plot(ReLUlist.values[:, 2], 'k', label = 'ReLU Test RMSE')\n\t#plt.plot(tanHlist.values[:, 1:], 'o-')\n\t#plt.plot(ReLUlist.values[:, 1:], 'o-')\n\tplt.legend(('tanh Train RMSE', 'tanh Test RMSE', 'ReLU Train RMSE', 'ReLU Test RMSE'))\t\n\tplt.xticks(range(tanHlist.shape[0]), hiddenLayers, rotation=30, horizontalalignment='right')\n\tplt.grid(True)\n\tplt.show()\n\n\n","sub_path":"Assignment3.py","file_name":"Assignment3.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"325205509","text":"import act\nimport glob\nimport json\nimport xarray as xr\nimport numpy as np\n\n# Read in ARM Live Data Webservice Token and Username\nwith open('./token.json') as f:\n data = json.load(f)\nusername = data['username']\ntoken = data['token']\n\n# Specify dictionary of datastreams, variables, and weights\ncf_ds = {'sgpmetE13.b1': {'variable': ['tbrg_precip_total', 'org_precip_rate_mean',\n 'pwd_precip_rate_mean_1min']},\n 'sgpvdisC1.b1': {'variable': ['rain_rate']},\n 'sgpvdisE13.b1': {'variable': ['rain_rate']},\n 'sgpdisdrometerC1.b1': {'variable': ['rain_rate']},\n 'sgpdisdrometerE13.b1': {'variable': ['rain_rate']},\n 'sgpstamppcpE13.b1': {'variable': ['precip']},\n 'sgpwbpluvio2C1.a1': {'variable': ['intensity_rtnrt']},\n 'sgpldE13.b1': {'variable': ['precip_rate']},\n 'sgpldC1.b1': {'variable': ['precip_rate']},\n 'sgpaosmetE13.a1': {'variable': ['rain_intensity']},\n 'sgpmwr3cC1.b1': {'variable': ['rain_intensity']}\n }\n\n# Specify date for analysis\nstartdate = '2017-01-01'\nenddate = '2019-12-31'\nsdate = ''.join(startdate.split('-'))\nedate = ''.join(enddate.split('-'))\ndays = act.utils.datetime_utils.dates_between(sdate, edate)\n\ndays = sorted(days)\n\n# Run through each day, convert precip to same units, combine all precip\n# into one object and write out to netcdf if more than 5 instruments are\n# recording precipitation.\n# Note the disdrometers routinely record precip in high wind conditions\n# and that was the reason for upping the threshold\nout_units = 'mm/hr'\nfor d in days:\n temp = False\n arm_d = d.strftime('%Y%m%d')\n d = d.strftime('%Y-%m-%d')\n precip = xr.Dataset()\n vmax = []\n\n # Run through each datastream\n for ds in cf_ds:\n # if data not available try and download\n files = glob.glob(''.join(['./', ds, '/*'+d+'*cdf']))\n if len(files) == 0:\n files = glob.glob(''.join(['./', ds, '/*'+d+'*nc']))\n if len(files) == 0:\n try:\n result = act.discovery.download_data(username, token, ds, d, d)\n except:\n pass\n files = glob.glob(''.join(['./', ds, '/*'+arm_d+'*cdf']))\n if len(files) == 0:\n files = glob.glob(''.join(['./', ds, '/*'+arm_d+'*nc']))\n if len(files) == 0:\n continue\n\n try:\n obj = act.io.armfiles.read_netcdf(files)\n except:\n continue\n\n # Run through each variable\n for v in cf_ds[ds]['variable']:\n da = obj[v]\n # Convert units and add to dataarray list\n units = da.attrs['units']\n if units == 'mm':\n da.attrs['units'] = 'mm/min'\n da.values = act.utils.data_utils.convert_units(da.values, da.attrs['units'], out_units)\n da.attrs['units'] = out_units\n da = da.resample(time='1min').mean()\n precip['_'.join([ds, v])] = da\n\n # Add temperature data\n if ds == 'sgpaosmetE13.a1' and temp is False:\n precip['temp_mean'] = obj['temperature_ambient'].resample(time='1min').mean()\n temp = True\n if ds == 'sgpmetE13.b1' and temp is False:\n precip['temp_mean'] = obj['temp_mean']\n temp = True\n\n da.close()\n obj.close()\n\n # Only use data when temperature above freezing\n precip = precip.where(precip['temp_mean'] > 0)\n vmax = [np.nanmax(precip[v].values) for v in precip]\n\n # Count number of instruments recording precip\n vsum = sum(i > 0 for i in vmax)\n if vsum > 5:\n precip.to_netcdf('./sgpprecip/sgpprecip.' + arm_d + '.nc')\n","sub_path":"parse_data.py","file_name":"parse_data.py","file_ext":"py","file_size_in_byte":3748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"626261841","text":"import os\nimport numpy as np\nfrom pathlib import Path\nfrom src.params.params import *\n\ndef save_data(data_out, subfolder=\"\", prefix=\"\", perturb_position=None,\n args=None):\n \"\"\"Save the data to disc.\"\"\"\n\n if args is None:\n print('Please supply an argument dictionary to save data.')\n exit()\n \n \n # Prepare variables to be used when saving\n n_data = data_out.shape[0]\n temp_time_to_run = \"{:.2e}\".format(args['time_to_run'])\n temp_forcing = \"{:.1f}\".format(args['forcing'])\n temp_ny = \"{:.2e}\".format(args['ny'])\n\n if len(subfolder) == 0:\n expected_name = f\"ny{temp_ny}_t{temp_time_to_run}\" +\\\n f\"_n_f{n_forcing}_f{temp_forcing}\"\n expected_path = f'./data/{expected_name}'\n \n # See if folder is present\n dir_exists = os.path.isdir(expected_path)\n\n if not dir_exists:\n os.mkdir(expected_path)\n \n subfolder = expected_name\n else:\n # Check if path exists\n expected_path = f'./data/{subfolder}'\n dir_exists = os.path.isdir(expected_path)\n\n if not dir_exists:\n os.mkdir(expected_path)\n\n\n\n if args['ref_run']:\n ref_header_extra = f\", rec_id={args['record_id']}\"\n subsubfolder = 'ref_data'\n # Check if path exists\n expected_path = f'./data/{subfolder}/{subsubfolder}'\n dir_exists = os.path.isdir(expected_path)\n\n # Make dir if not present\n if not dir_exists:\n os.mkdir(expected_path)\n\n prefix = \"ref_\"\n\n ref_filename_extra = f\"_rec{args['record_id']}\"\n\n ref_data_info_name = f\"data/{subfolder}/{subsubfolder}/ref_data_info_ny\"+\\\n f\"{temp_ny}_t{temp_time_to_run}\"+\\\n f\"_n_f{n_forcing}_f{temp_forcing}.txt\"\n info_line = f\"f={args['forcing']}, n_f={n_forcing}, n_ny={args['ny_n']}, \" +\\\n f\"ny={args['ny']}, time={args['time_to_run']}, dt={dt}, epsilon={epsilon}, \" +\\\n f\"lambda={lambda_const}, n_records={args['n_records']}, \" +\\\n f\"burn_in_time={args['burn_in_time']}, \" +\\\n f\"record_max_time={args['record_max_time']}, \" +\\\n f\"sample_rate={sample_rate}\"\n with open(ref_data_info_name, 'w') as file:\n file.write(info_line)\n\n else:\n ref_header_extra = \"\"\n ref_filename_extra = \"\"\n subsubfolder = args['perturb_folder']\n\n # Check if path exists\n expected_path = f'./data/{subfolder}/{subsubfolder}'\n dir_exists = os.path.isdir(expected_path)\n\n # Make dir if not present\n if not dir_exists:\n os.mkdir(expected_path)\n \n header = f\"f={args['forcing']}, n_f={n_forcing}, n_ny={args['ny_n']}, ny={args['ny']}, \" +\\\n f\"time={args['time_to_run']}, dt={dt}, epsilon={epsilon}, \" +\\\n f\"lambda={lambda_const}, N_data={n_data}, \" +\\\n f\"sample_rate={sample_rate}\" + ref_header_extra\n \n if perturb_position is not None:\n header += f', perturb_pos={int(perturb_position)}'\n\n # Save data\n np.savetxt(f\"data/{subfolder}/{subsubfolder}/{prefix}udata_ny{temp_ny}_t{temp_time_to_run}\"+\n f\"_n_f{n_forcing}_f{temp_forcing}{ref_filename_extra}.csv\",\n data_out, delimiter=\",\", header=header)\n \ndef save_perturb_info(args=None):\n \"\"\"Save info textfile about the perturbation runs\"\"\"\n\n temp_time_to_run = \"{:.2e}\".format(args['time_to_run'])\n temp_forcing = \"{:.1f}\".format(args['forcing'])\n temp_ny = \"{:.2e}\".format(args['ny'])\n\n # Prepare filename\n perturb_data_info_name = Path(args['path'], args['perturb_folder'], \n f\"perturb_data_info_ny{temp_ny}_t{temp_time_to_run}\"+\\\n f\"_n_f{n_forcing}_f{temp_forcing}.txt\")\n\n # Check if path already exists\n dir_exists = os.path.isdir(perturb_data_info_name)\n if dir_exists:\n return\n \n print('Saving perturb data info textfile\\n')\n\n # Prepare line to write\n info_line = f\"f={args['forcing']}, n_f={n_forcing}, n_ny={args['ny_n']}, \" +\\\n f\"ny={args['ny']}, time={args['time_to_run']}, dt={dt}, epsilon={epsilon}, \" +\\\n f\"lambda={lambda_const}, \" +\\\n f\"burn_in_time={args['burn_in_time']}, \" +\\\n f\"sample_rate={sample_rate}, eigen_perturb={args['eigen_perturb']}, \"+\\\n f\"seed_mode={args['seed_mode']}, \"+\\\n f\"single_shell_perturb={args['single_shell_perturb']}, \"+\\\n f\"start_time_offset={args['start_time_offset']}\"\n \n # Write to file\n with open(str(perturb_data_info_name), 'w') as file:\n file.write(info_line)\n","sub_path":"utils/save_data_funcs.py","file_name":"save_data_funcs.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"295234972","text":"\"\"\"oms URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom order import views as order_views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib.auth import views as auth_views\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',order_views.home,name='home'),\n path('login/',order_views.loginuser,name='login'),\n path('register/', order_views.registeruser,name='register'),\n path('logout/', order_views.logoutuser,name='logout'),\n path('profile/', order_views.userprofile,name='profile'),\n path('changepassword/', order_views.ChangePassword.as_view(template_name = 'order/changepassword.html'),name='changepassword'),\n\n path('products/',order_views.products,name='products'),\n path('createorder/',order_views.createOrder,name=\"createOrder\"),\n path('updateorder//',order_views.updateOrder,name='updateOrder'),\n path('deleteorder//',order_views.deleteOrder,name='deleteOrder'),\n path('checkhome/',order_views.checkhome,name='dash'),\n path('searchorder/',order_views.search,name='searchorder'),\n \n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)","sub_path":"oms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"537360696","text":"# A Simple Dictionary\nalien_0 = {'color':'green', 'points':5} #{key:value, key:value}\nprint(alien_0)\nprint(alien_0['color'])\nprint(alien_0['points'])\n\n# Adding new Key-Vaule Pairs to Existing Dictionary\nalien_0['x_position'] = 0\nalien_0['y_position'] = 25\nprint(alien_0)\n\n# You can also start with empty dictionaries\nalien_0 = {}\nalien_0['color'] = 'green'\nalien_0['points'] = 5\nprint(alien_0)\n\n# Modifying Values in a Dictionary\nalien_0 = {'color':'green'}\nprint(f\"The alien is {alien_0['color']}.\")\n\nalien_0['color'] = 'yellow'\nprint(f\"The alien is now {alien_0['color']}.\")\n\n# Tracking the Alien's movement.\nalien_0 = {'x_position': 0, 'y_position': 25, 'speed': 'medium'}\nprint(f\"Original position: {alien_0['x_position']}\")\n\n# Move the alien to the right.\n# Determine how far to move the alien based on its current speed.\nif alien_0['speed'] == 'slow':\n x_increment = 1\nelif alien_0['speed'] == 'medium':\n x_increment = 2\nelse:\n # This must be a fast alien\n x_increment = 3\nalien_0['x_position'] = alien_0['x_position'] + x_increment\nprint(f\"New position: {alien_0['x_position']}\")\n\n# Removing Key-Value Pairs\nalien_0 = {'color':'green', 'points':5}\nprint(alien_0)\ndel alien_0['points']\nprint(alien_0)\n\n# A Dictionary of Similiar objects\nfavorite_languages = {'jen':'python',\n 'sarah':'c',\n 'edward':'ruby',\n 'phil':'python'}\nlanguage = favorite_languages['sarah'].title()\nprint(f\"Sarah's favorite language is {language}\")\n\n# using get() to Access Values\nalien_0 = {'color':'green', 'speed':'slow'}\n# print(alien_0['points']) - Returns a compile error because points does not exist yet\npoint_value = alien_0.get('points', 'No point value assigned.')\nprint(point_value)\n\n# Looping through all key value pairs\nuser_0 = {\n 'username':'efermi',\n 'first':'enrico',\n 'last':'fermi',\n }\n\nfor key, value in user_0.items():\n print(f\"\\nKey: {key}\")\n print(f\"\\nvalue: {value}\")\n\n# Looping though the keys only\nfavorite_languages = {'jen':'python',\n 'sarah':'c',\n 'edward':'ruby',\n 'phil':'python'}\n\nfriends = ['phil', 'sarah']\nfor name in favorite_languages.keys():\n print(name.title())\n\n if name in friends:\n language = favorite_languages[name].title()\n print(f\"\\t{name.title()}, I see you love {language}!\")\n\n# Check to see if a user was polled\nif 'erin' not in favorite_languages.keys():\n print(\"Erin, please take our poll!\")\n\n# Looping through keys in a particular order - Alphabetically\nfor name in sorted(favorite_languages.keys()):\n print(f\"{name.title()}, thank you for taking the pol.\")\n\n# Looping through all values in a Dictionary\nprint(\"The following languages have been mentioned:\")\nfor language in favorite_languages.values():\n print(language.title())\n\n# Use set() set to list the unique entries - omits duplicates such as 'Python'\nprint(\"The following languages have been mentioned:\")\nfor language in set(favorite_languages.values()):\n print(language.title())\n\n# The three largest rivers in the World example\nrivers = {'nile':'egypt', 'amazon':'brazil', 'yangtze':'china'}\n\nfor river in rivers.keys():\n print(f\"The {river.title()} river runs through {rivers[river].title()}\")\n#OR\nfor key, value in rivers.items():\n print(f\"The {key.title()} river runs through {value.title()}\")\n\n# People polled example\npeople_polled = ['jen', 'sarah', 'edward', 'phil', 'bob']\nfor name in people_polled:\n if name in favorite_languages.keys():\n print(f\"{name}, thank you for taking our poll!\")\n if name not in favorite_languages.keys():\n print(f\"{name}, please take our poll!\")\n\n# Nested Dictionaries in a list\nalien_0 = {'color':'green', 'points':5}\nalien_1 = {'color':'yelow', 'points':10}\nalien_2 = {'color':'red', 'points':15}\n\naliens = [alien_0, alien_1, alien_2]\n\nfor alien in aliens:\n print(alien)\n\n# Use range() to create a fleet of aliens\naliens = []\nfor alien_number in range(30):\n new_alien = {'color':'green', 'points':5, 'speed':'slow'}\n aliens.append(new_alien)\n# Show the first 5 aliens.\nfor alien in aliens[:5]:\n print(alien)\nprint('...')\n# Show how many aliens have been created\nprint(f\"Total numjber of alies = {len(aliens)}\")\n\n#Change the first three aliens to faster aliens\nfor alien in aliens[:3]:\n if alien['color'] == 'green':\n alien['color'] = 'yellow'\n alien['speed'] = 'medium'\n alien['points'] = 10\n elif alien['color'] == 'yellow':\n alien['color'] = 'red'\n alien['speed'] = 'fast'\n alien['points'] = 15\n\n# Show the first 5 aliens.\nfor alien in aliens[:5]:\n print(alien)\nprint(\"...\")\n\n# Storing a list in a Dictionary - Example 1\npizza = { 'crust':'thick',\n 'toppings':['mushrooms', 'extra cheese'],\n }\nprint(f\"You ordered a {pizza['crust']}-crust pizza\"\n \" with the following toppings:\")\nfor topping in pizza['toppings']:\n print(\"\\t\" + topping)\n\n# Storing a list in a Dictionary = Example 2\nfavorite_languages = {\n 'jen': ['python', 'ruby'],\n 'sarah':['c'],\n 'edward':['ruby','go'],\n 'phil':['python','haskell'],\n}\n\nfor name, languages in favorite_languages.items():\n print(f\"\\n{name.title()}'s favorite languages are:\")\n for language in languages:\n print(f\"\\t{language.title()}\")\n\n# A Dictionary in a Dictionary\nusers = {\n 'aeinstein': {\n 'first':'albert',\n 'last':'einstein',\n 'location':'princeton',\n },\n 'mcurie': {\n 'first':'marie',\n 'last':'curie',\n 'location':'paris',\n },\n}\n\nfor username, user_info in users.items():\n print(f\"\\nUsername: {username}\")\n full_name = f\"{user_info['first']} {user_info['last']}\"\n location = user_info['location']\n\n print(f\"\\tFull name: {full_name.title()}\")\n print(f\"\\tLocation: {location.title()}\")\n\n# Example 6-8 Pets:\npet0 = {'type':'cat','owner':'Bob'}\npet1 = {'type':'dog', 'owner':'Tom'}\npet2 = {'type':'dragon', 'owner':'khaleesi'}\n\npets = [pet0, pet1, pet2]\n\nfor pet in pets:\n print(f\"{pet['owner'].title()} has a {pet['type'].title()}\")\n\n\n","sub_path":"ch1_ch9/ch6_dictionaries.py","file_name":"ch6_dictionaries.py","file_ext":"py","file_size_in_byte":6059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"508645835","text":"import os\nimport logging\nimport numpy as np\nfrom PIL import Image\n\nimport config as c\n\n\ndef config_log():\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(levelname)s \\n %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename=os.path.join(c.SAVE_PATH, \"train.log\"),\n filemode='w')\n\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n\ndef normalize_frames(frames):\n \"\"\"\n Convert frames from int8 [0, 255] to float32 [-1, 1].\n\n @param frames: A numpy array. The frames to be converted.\n\n @return: The normalized frames.\n \"\"\"\n new_frames = frames.astype(np.float32)\n new_frames = new_frames * 3 / 255\n # new_frames /= (255 / 2)\n # new_frames -= 1\n # if frames.min() == np.inf or frames.max() == np.inf or new_frames.min() == np.inf or new_frames.max() == np.inf:\n # print(frames.min(), frames.max(), new_frames.min(), new_frames.max())\n return new_frames\n\n\ndef denormalize_frames(frames):\n \"\"\"\n Performs the inverse operation of normalize_frames.\n\n @param frames: A numpy array. The frames to be converted.\n\n @return: The denormalized frames.\n \"\"\"\n new_frames = frames / 3 * 255\n # new_frames = frames + 1\n #new_frames *= (255 / 2)\n # noinspection PyUnresolvedReferences\n new_frames = new_frames.astype(np.uint8)\n\n return new_frames\n\n\n\ndef save_png(data, path):\n data[data < 0] = 0\n if c.NORMALIZE:\n data = denormalize_frames(data)\n else:\n data = data.astype(np.uint8)\n\n if not os.path.exists(path):\n os.makedirs(path)\n shape = data.shape\n data = data.reshape(shape[0], shape[-3], shape[-2])\n i = 1\n for img in data[:]:\n img = Image.fromarray(img)\n img.save(os.path.join(path, str(i) + \".png\"))\n i += 1\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"599756931","text":"#!/bin/bash\n\n#python3.6\n\n#2018-11-19\n\n#sarahfong\n\n################################\n# What this script does\n################################\n\n# find out what splits of enhancers\n# have not completed running on ACCRE\n# because the script timed out after 24h\n# and re-run those for 48h\n\n################################\n# modules\n################################\n\nfrom itertools import groupby\nimport glob\nimport pandas\nimport os, sys\nimport datetime\nprint(\"last run was\", datetime.datetime.now())\n################################\n# data path & files\n################################\n\npath = \"/dors/capra_lab/data_clean/mpra/wang17/age/\"\noutpath = path\n#data = sys.argv[1]\n\n\n# get the *no_syn_alignmennt_tiles.txt files that have collected records that do not match syntenic blocks\n# compare with the completed files in finished_list\n# launch the rest of the jobs that need longer to run\n\nalignmennt_list = glob.glob(\"%s*no_syn_alignmennt_tiles.txt\"% path)\nidx_dict = {}\nsorted(alignmennt_list)\nfor data in alignmennt_list:\n idx = ((((data.split(\"/\")[-1]).split(\"_\")[2]).split(\"-\")[1])).split(\"no\")[0]\n chr_number = ((data.split(\"/\")[-1]).split(\"_\")[1])\n\n if chr_number not in idx_dict.keys():\n idx_dict[chr_number] = []\n idx_dict[chr_number].append(idx)\n \n else:\n idx_dict[chr_number].append(idx)\n\n# compare with the completed files in finished_list\nfinished_list = glob.glob(\"%sGM12878Lib_chr*_ages-*_core_breaks.bed\"%path)\n\nfinished_dict={}\nsorted(finished_list)\nfor data in finished_list:\n idx = (((data.split(\"/\")[-1]).split(\"_\")[2]).split(\"-\")[1])\n chr_number = ((data.split(\"/\")[-1]).split(\"_\")[1])\n if chr_number not in finished_dict.keys():\n finished_dict[chr_number] = []\n finished_dict[chr_number].append(idx)\n \n else:\n finished_dict[chr_number].append(idx)\n\n# launch the rest of the jobs that need longer to run\n\nlaunch_list = glob.glob(\"%slaunch*.bed\"%path)\n\nfor i in sorted(launch_list):\n print(i)\n\nlaunch_dict={}\nsorted(launch_list)\nfor data in launch_list:\n idx = (((data.split(\"/\")[-1]).split(\"-\")[1]).split(\".\")[0])\n chr_number = ((data.split(\"/\")[-1]).split(\"_\")[3])\n print(chr_number, idx)\n if chr_number not in launch_dict.keys():\n launch_dict[chr_number] = []\n launch_dict[chr_number].append(idx)\n \n \n else:\n launch_dict[chr_number].append(idx)\n \nfor key, value in finished_dict.items():\n if key in idx_dict.keys():\n for i in idx_dict[key]:\n if i not in finished_dict[key]:\n \n to_launch =\"/dors/capra_lab/data_clean/mpra/wang17/age/launch_split_GM12878Lib_%s_ages-%s.bed\"%(key, i)\n \n # launch the breaks analysis.\n launch_cmd = \"sbatch /dors/capra_lab/users/fongsl/enh_age/scripts/mpra_x_enh_age/wang2017/mpra_breaks_wang.sbatch %s\" % to_launch\n print(launch_cmd) \n os.system(launch_cmd)","sub_path":"enhancer_age_complexity/mpra/wang2017/launch2_mpra_breaks_wang.py","file_name":"launch2_mpra_breaks_wang.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"643963084","text":"# Soren Sabet Sarvestany \n# April 7th 2019\n\n# Implement an algorithm to determine if a string has all unique characters. What if you cannot use additional data structures? \n\n\n####### My Solution 1 #########\n# Assume all characters in string can be represented by ASCII Character set\ndef tester(s):\n print(s + ': ' + str(isunique(s)))\n\ndef isunique(s):\n \n if (len(s) > 256): # I didn't get this \n return False\n \n l = [None]*256 # O(1)\n for c in s: # O(n) where n is the length of s\n if (l[ord(c)] != None): # O(1) \n return False # O(1)\n else: \n l[ord(c)] = 1 # O(1)\n return True\n# Therefore the function is Theta(1) and O(n) (time complexity)\n# Space complexity is O(1)\n# This is the same solution presented in the text. \n\n\n# Testing different cases \ntester('This is a test string')\ntester('')\ntester('ab')\ntester('Bab')\ntester('1')\ntester('abc')\ntester('aba')\n\n","sub_path":"Chapter1/isUnique.py","file_name":"isUnique.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"283627321","text":"from typing import *\n\ndef get_max_profit_and_range(a: List[int]):\n if not a:\n return 0\n\n global_max_profit = 0\n global_max_profit_begin_idx = 0\n global_max_profit_end_idx = 0\n\n current_profit = 0\n\n min_idx = 0\n min_val = a[0]\n\n for i, num in enumerate(a):\n if num - min_val <= 0:\n # Reset\n min_idx = i\n min_val = num\n\n current_profit = num - min_val\n\n if current_profit > global_max_profit:\n # Establish new or expand existing global max\n global_max_profit = current_profit\n global_max_profit_begin_idx = min_idx\n global_max_profit_end_idx = i\n\n\n return global_max_profit, (global_max_profit_begin_idx, global_max_profit_end_idx)\n\n \ndef max_profit_two_times(a: List[int]):\n # Get max profit and its segments. Get max profit again using the remaining segments of original array\n # One small nuance / shortcut is that we need max from *at most* two segments, but it's possible that doesn't matter\n # E.g. [0,1,2,3,4] will be covered in the first pass and answer will be the same regardless\n\n profit1, profit_range = get_max_profit_and_range(a)\n print(profit1, profit_range)\n\n left_remaining_range = a[:profit_range[0]]\n right_remaining_range = a[profit_range[1] + 1 :]\n\n # If anything remains on the left, get profit there\n profit2 = 0\n profit_range2 = None\n if left_remaining_range:\n profit2, profit_range2 = get_max_profit_and_range(left_remaining_range)\n\n # If anything remains on right, get profit there\n profit3 = 0\n profit_range3 = None\n if right_remaining_range:\n profit3, profit_range3 = get_max_profit_and_range(right_remaining_range)\n\n print(profit2, profit_range2)\n print(profit3, profit_range3)\n\n return profit1 + max(profit2, profit3)\n\n\ndef max_profit_k_times(a: List[int]):\n \"\"\"\n How to do this? Use a queue for the remaining stacks???\n \"\"\"\n pass\n\n\ndef max_profit_unlimited(a: List[int]):\n # this is totally different, literally just add up all increasing values\n total_profit = 0\n prev_value = a[0]\n\n for num in a:\n if num > prev_value:\n total_profit += num - prev_value\n prev_value = num\n\n return total_profit\n\na = [0,1,-1,0,-5,-4]\n#print(get_max_profit_and_range(a[:1]))\n#print(max_profit_two_times(a))\nprint(max_profit_unlimited(a))\n","sub_path":"eopi/ch06/6.09/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"207900960","text":"import tensorflow as tf\nimport config as cfg\nfrom tensorflow.python.ops import control_flow_ops \nfrom tensorflow.python.training import moving_averages \n\nclass Model_net(object):\n def __init__(self):\n self.BN_DECAY = 0.9997\n self.UPDATE_OPS_COLLECTION = 'resnet_update_ops'\n self.BN_EPSILON = 0.001\n self.RESNET_VARIABLES = 'resnet_variables'\n self.image_size_x = cfg.image_size_x\n self.image_size_y = cfg.image_size_y\n self.ground_size_x = cfg.ground_size_x\n self.ground_size_y = cfg.ground_size_y\n self.xs = tf.placeholder(tf.float32,shape=[None,self.image_size_x,self.image_size_y,3])\n self.x_image = tf.reshape(self.xs,[-1,self.image_size_x,self.image_size_y,3])\n self.training_flag = tf.placeholder(tf.bool)\n self.ys = tf.placeholder(tf.float32,shape=[None,self.ground_size_x,self.ground_size_y])\n self.y_image = tf.reshape(self.ys,[-1,self.ground_size_x,self.ground_size_y,1])\n \n self.prediction = self.model(self.x_image)\n self.loss_sum = self.loss_layer(self.prediction,self.y_image)\n \n #################################\n def weight_variable(self,shape):\n initial = tf.truncated_normal(shape,stddev=0.1)\n return tf.Variable(initial)\n\n ###################################\n def biases_variable(self,shape):\n initial = tf.constant(0.1,shape=shape)\n return tf.Variable(initial)\n \n ###################################\n def _get_variable(self,name,\n shape,\n initializer,\n weight_decay=0.0,\n dtype='float',\n trainable=True):\n \"A little wrapper around tf.get_variable to do weight decay and add to\"\n \"resnet collection\"\n if weight_decay > 0:\n regularizer = tf.contrib.layers.l2_regularizer(weight_decay)\n else:\n regularizer = None\n collections = [tf.GraphKeys.VARIABLES, self.RESNET_VARIABLES]\n return tf.get_variable(name,\n shape=shape,\n initializer=initializer,\n dtype=dtype,\n regularizer=regularizer,\n collections=collections,\n trainable=trainable)\n\n #####################\n def bn(self, x, is_training):\n x_shape = x.get_shape() \n params_shape = x_shape[-1:] \n axis = list(range(len(x_shape) - 1)) \n beta = self._get_variable('beta', params_shape, initializer=tf.zeros_initializer()) \n gamma = self._get_variable('gamma', params_shape, initializer=tf.ones_initializer()) \n moving_mean = self._get_variable('moving_mean', params_shape, initializer=tf.zeros_initializer(), trainable=False) \n moving_variance = self._get_variable('moving_variance', params_shape, initializer=tf.ones_initializer(), trainable=False) \n # These ops will only be preformed when training. \n mean, variance = tf.nn.moments(x, axis) \n update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, self.BN_DECAY) \n update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, self.BN_DECAY) \n tf.add_to_collection(self.UPDATE_OPS_COLLECTION, update_moving_mean) \n tf.add_to_collection(self.UPDATE_OPS_COLLECTION, update_moving_variance) \n mean, variance = control_flow_ops.cond( \n is_training, lambda: (mean, variance), \n lambda: (moving_mean, moving_variance)) \n return tf.nn.batch_normalization(x, mean, variance, beta, gamma, self.BN_EPSILON) \n #################################\n def model(self, x_image):\n with tf.variable_scope('model1'):\n #####8-9-26-27-28\n #model 1\n W_conv1 = self.weight_variable([5,5,3,36])\n b_conv1 = self.biases_variable([36])\n h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image,W_conv1,strides=[1,1,1,1],padding='SAME')+b_conv1)\n h_pool1 = tf.nn.max_pool(h_conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W_conv2 = self.weight_variable([7,7,36,72])\n b_conv2 = self.biases_variable([72])\n h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1,W_conv2,strides=[1,1,1,1],padding='SAME')+b_conv2)\n #h_pool2 = tf.nn.max_pool(h_conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W_conv3 = self.weight_variable([13,13,72,36])\n b_conv3 = self.biases_variable([36])\n h_conv3 = tf.nn.relu(tf.nn.conv2d(h_conv2,W_conv3,strides=[1,1,1,1],padding='SAME')+b_conv3)\n #h_pool3 = tf.nn.max_pool(h_conv3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W_conv4 = self.weight_variable([11,11,36,1])\n b_conv4 = self.biases_variable([1])\n h_conv4 = tf.nn.relu(tf.nn.conv2d(h_conv3,W_conv4,strides=[1,1,1,1],padding='SAME')+b_conv4)\n\n #h_conv4 = bn(h_conv4,training_flag)\n\n with tf.variable_scope('model2'):\n #####6-7-21-22-23\n #model2\n W2_conv1 = self.weight_variable([3,3,3,24])\n b2_conv1 = self.biases_variable([24])\n h2_conv1 = tf.nn.relu(tf.nn.conv2d(x_image,W2_conv1,strides=[1,1,1,1],padding='SAME')+b2_conv1)\n h2_pool1 = tf.nn.max_pool(h2_conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W2_conv2 = self.weight_variable([5,5,24,48])\n b2_conv2 = self.biases_variable([48])\n h2_conv2 = tf.nn.relu(tf.nn.conv2d(h2_pool1,W2_conv2,strides=[1,1,1,1],padding='SAME')+b2_conv2)\n #h2_pool2 = tf.nn.max_pool(h2_conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W2_conv3 = self.weight_variable([15,15,48,24])\n b2_conv3 = self.biases_variable([24])\n h2_conv3 = tf.nn.relu(tf.nn.conv2d(h2_conv2,W2_conv3,strides=[1,1,1,1],padding='SAME')+b2_conv3)\n #h_pool3 = tf.nn.max_pool(h_conv3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W2_conv4 = self.weight_variable([11,11,24,1])\n b2_conv4 = self.biases_variable([1])\n h2_conv4 = tf.nn.relu(tf.nn.conv2d(h2_conv3,W2_conv4,strides=[1,1,1,1],padding='SAME')+b2_conv4)\n\n #h2_conv4 = bn(h2_conv4,training_flag)\n\n with tf.variable_scope('model3'):\n #####4-5-16-17-18\n #model3\n W3_conv1 = self.weight_variable([3,3,3,24])\n b3_conv1 = self.biases_variable([24])\n h3_conv1 = tf.nn.relu(tf.nn.conv2d(x_image,W3_conv1,strides=[1,1,1,1],padding='SAME')+b3_conv1)\n h3_pool1 = tf.nn.max_pool(h3_conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W3_conv2 = self.weight_variable([5,5,24,48])\n b3_conv2 = self.biases_variable([48])\n h3_conv2 = tf.nn.relu(tf.nn.conv2d(h3_pool1,W3_conv2,strides=[1,1,1,1],padding='SAME')+b3_conv2)\n #h3_pool2 = tf.nn.max_pool(h3_conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W3_conv3 = self.weight_variable([11,11,48,24])\n b3_conv3 = self.biases_variable([24])\n h3_conv3 = tf.nn.relu(tf.nn.conv2d(h3_conv2,W3_conv3,strides=[1,1,1,1],padding='SAME')+b3_conv3)\n #h_pool3 = tf.nn.max_pool(h_conv3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W3_conv4 = self.weight_variable([9,9,24,1])\n b3_conv4 = self.biases_variable([1])\n h3_conv4 = tf.nn.relu(tf.nn.conv2d(h3_conv3,W3_conv4,strides=[1,1,1,1],padding='SAME')+b3_conv4)\n\n #h3_conv4 = bn(h3_conv4,training_flag)\n\n with tf.variable_scope('model4'):\n #####2-3-13-14\n #model4\n W4_conv1 = self.weight_variable([7,7,3,18])\n b4_conv1 = self.biases_variable([18])\n #h4_conv1 = tf.nn.relu(tf.nn.atrous_conv2d(x_image,W4_conv1,rate=2,padding='SAME')+b4_conv1)\n h4_conv1 = tf.nn.relu(tf.nn.conv2d(x_image,W4_conv1,strides=[1,1,1,1],padding='SAME')+b4_conv1) \n h4_pool1 = tf.nn.max_pool(h4_conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W4_conv2 = self.weight_variable([9,9,18,36])\n b4_conv2 = self.biases_variable([36])\n h4_conv2 = tf.nn.relu(tf.nn.atrous_conv2d(h4_pool1,W4_conv2,rate=2,padding='SAME')+b4_conv2)\n #h4_conv2 = tf.nn.relu(tf.nn.conv2d(h4_pool1,W4_conv2,strides=[1,1,1,1],padding='SAME')+b4_conv2) \n #h4_pool2 = tf.nn.max_pool(h4_conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W4_conv3 = self.weight_variable([9,9,36,6])\n b4_conv3 = self.biases_variable([6])\n h4_conv3 = tf.nn.relu(tf.nn.atrous_conv2d(h4_conv2,W4_conv3,rate=2,padding='SAME')+b4_conv3)\n #h4_conv3 = tf.nn.relu(tf.nn.conv2d(h4_conv2,W4_conv3,strides=[1,1,1,1],padding='SAME')+b4_conv3) \n #h_pool3 = tf.nn.max_pool(h_conv3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W4_conv4 = self.weight_variable([7,7,6,1])\n b4_conv4 = self.biases_variable([1])\n #h4_conv4 = tf.nn.relu(tf.nn.atrous_conv2d(h4_conv3,W4_conv4,rate=2,padding='SAME')+b4_conv4)\n h4_conv4 = tf.nn.relu(tf.nn.conv2d(h4_conv3,W4_conv4,strides=[1,1,1,1],padding='SAME')+b4_conv4) \n\n #h4_conv4 = bn(h4_conv4,training_flag)\n '''\n with tf.variable_scope('model5'):\n #####1-2\n #model5\n W5_conv1 = self.weight_variable([3,3,3,21])\n b5_conv1 = self.biases_variable([21])\n h5_conv1 = tf.nn.relu(tf.nn.atrous_conv2d(self.x_image,W5_conv1,rate=2,padding='SAME')+b5_conv1)\n h5_pool1 = tf.nn.max_pool(h5_conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W5_conv2 = self.weight_variable([7,7,21,42])\n b5_conv2 = self.biases_variable([42])\n h5_conv2 = tf.nn.relu(tf.nn.atrous_conv2d(h5_pool1,W5_conv2,rate=2,padding='SAME')+b5_conv2)\n #h5_pool2 = tf.nn.max_pool(h5_conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W5_conv3 = self.weight_variable([7,7,42,7])\n b5_conv3 = self.biases_variable([7])\n h5_conv3 = tf.nn.relu(tf.nn.atrous_conv2d(h5_conv2,W5_conv3,rate=2,padding='SAME')+b5_conv3)\n #h_pool3 = tf.nn.max_pool(h_conv3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\n W5_conv4 = self.weight_variable([5,5,7,1])\n b5_conv4 = self.biases_variable([1])\n h5_conv4 = tf.nn.relu(tf.nn.atrous_conv2d(h5_conv3,W5_conv4,rate=2,padding='SAME')+b5_conv4)\n\n #h5_conv4 = bn(h5_conv4,training_flag)\n '''\n with tf.variable_scope('merge_model'):\n #merged_layer = tf.nn.sigmoid(tf.add(tf.add(tf.add(tf.add(h_conv4,h2_conv4),h3_conv4),h4_conv4),h5_conv4))\n node = []\n node.append(h_conv4)\n node.append(h2_conv4)\n node.append(h3_conv4)\n node.append(h4_conv4)\n #node.append(h5_conv4)\n merged_layer = tf.concat(node,3)\n\n merge_weight = self.weight_variable([1,1,4,1])\n merge_bias = self.biases_variable([1])\n merge_conv = tf.nn.conv2d(merged_layer,merge_weight,strides=[1,1,1,1],padding='SAME')+merge_bias\n merge_conv = self.bn(merge_conv,self.training_flag)\n\n #Wm_conv1 = self.weight_variable([])\n return merge_conv\n \n \n \n def loss_layer(self,net,ground_truth):\n prediction = tf.reshape(tensor=net,shape=(-1,2))\n y_image_ = tf.nn.max_pool(ground_truth,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n y_image_ = tf.reshape(tensor=y_image_,shape=(-1,2))\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y_image_)\n loss_sum = tf.reduce_sum(loss)\n return loss_sum\n \n\n\n","sub_path":"counting method/varia/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"652319167","text":"#!/usr/bin/python3\nfrom eulertools import date\n\nDate = date.Date\n\nd = Date()\n\nd += 365\n\nfirstSundays = 0\n\nwhile d.year < 2001:\n if d.dayOfWeek == 7 and d.day == 1:\n firstSundays += 1\n\n d += 1\n\nprint('Sundays on the first in the 21st century: {0}'.format(firstSundays))\n","sub_path":"python/problem19.py","file_name":"problem19.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"389124205","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nplt.rcParams[\"text.usetex\"] = True\nplt.rcParams['font.family'] = 'sans-serif'\nplt.rcParams[\"mathtext.fontset\"] = \"dejavuserif\"\nplt.rcParams['font.monospace'] = 'Ubuntu Mono'\nplt.rcParams['font.serif'] = 'Computer Modern Roman'\n\nplt.rcParams['lines.linewidth'] = 1.5\nplt.rcParams['xtick.labelsize'] = 16\nplt.rcParams['ytick.labelsize'] = 16\nplt.rcParams['axes.labelsize'] = 24\nplt.rcParams['axes.titlesize'] = 24\nplt.rcParams['legend.fancybox'] = True\nplt.rcParams['legend.fontsize'] = 16\n\nDs = np.arange(5, 100 + 1, step=5)\n\nanalytic_E0 = np.load(\"datas/E0_sum.npz\")\ngs = analytic_E0[\"gs\"]\nE0s = analytic_E0[\"E0s\"]\n\ngs, E0s = gs[3:6], E0s[3:6]\n\n\"\"\"\nvumps_data_dir = \"datas/E0s_general1/\"\nerrors = np.empty((gs.size, Ds.size))\nfor col in range(Ds.size):\n E0_D = np.load(vumps_data_dir + \"D_%d.npz\" % Ds[col])[\"E0s\"]\n #error_D = np.log10( np.abs((E0_D - E0s) / E0s) )\n error_D = np.abs((E0_D - E0s) / E0s)\n errors[:, col] = error_D\n\"\"\"\n\nvumps_data_dir = \"datas/E0s_general/\"\nerrors = np.empty((gs.size, Ds.size))\nfor row in range(gs.size):\n E0_g = np.load(vumps_data_dir + \"g_%.2f.npz\" % gs[row])[\"E0s\"]\n print(\"g:\", gs[row], E0_g)\n error_g = np.abs((E0_g - E0s[row]) / E0s[row])\n errors[row, :] = error_g\n\n\"\"\"\nfor i in range(1, 5):\n for row in [4 - i, 4, 4 + i]:\n plt.plot(Ds, errors[row, :], \"o-\", label=r\"$g = %.2f$\" % gs[row])\n plt.legend()\n plt.xlabel(r\"$D$\")\n plt.ylabel(r\"Energy relative error\")\n plt.yscale(\"log\")\n plt.subplots_adjust(bottom=0.15, left=0.15)\n plt.savefig(vumps_data_dir + \"error%d.pdf\" % i)\n plt.show()\n\"\"\"\n\nfor row in range(3):\n plt.plot(Ds, errors[row, :], \"o-\", label=r\"$g = %.2f$\" % gs[row])\nplt.legend()\nplt.xlabel(r\"$D$\")\nplt.ylabel(r\"Energy relative error\")\nplt.yscale(\"log\")\nplt.subplots_adjust(bottom=0.15, left=0.15)\nplt.savefig(vumps_data_dir + \"error.pdf\")\nplt.show()\n","sub_path":"examples/TFIM_vumps/ploterror.py","file_name":"ploterror.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"278470783","text":"import requests\nimport urllib.parse\nimport csv\nimport json\nimport pandas as pd\nimport re\nfrom dotenv import load_dotenv\nimport os\n\nfrom newsapi import NewsApiClient\n\nload_dotenv()\n\ndef sentiment_analysis(text):\n url = \"https://text-sentiment.p.rapidapi.com/analyze\"\n formatted = text.translate ({ord(c): \"\" for c in \"!@#$%^&*()[]{};:,./<>?\\|`~-=_+\"})\n formatted = urllib.parse.quote(formatted)\n payload = f\"text={formatted}\"\n print (payload)\n headers = {\n 'x-rapidapi-host': \"text-sentiment.p.rapidapi.com\",\n 'x-rapidapi-key': os.getenv(\"RAPID_API_KEY\"),\n 'content-type': \"application/x-www-form-urlencoded\"\n }\n\n response = requests.request(\"POST\", url, data=payload, headers=headers)\n print(response)\n score = 0\n score += response.json()['pos']\n score -= response.json()['neg']\n return score\n\n\n\n\nInit\nnewsapi = NewsApiClient(api_key=os.getenv('NEWS_API_KEY'))\n\nall_articles = newsapi.get_everything(q='tsla',\n # sources='bbc-news,the-verge',\n # domains='bbc.co.uk,techcrunch.com',\n from_param='2019-12-01',\n to='2019-12-12',\n language='en',\n sort_by='relevancy',\n page_size=100,\n page=1)\n\n\nnews_df = pd.DataFrame.from_dict(all_articles['articles'])\n\nnews_df = news_df[['source','author','title','publishedAt']]\nnews_df['source'] = news_df['source'].apply(lambda x: x['name'])\nnews_df['sentiment'] = news_df['title'].apply(lambda x: sentiment_analysis(x))\n\nnews_df.to_csv('news_data.csv')\n\n\n","sub_path":"news_api.py","file_name":"news_api.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"416471121","text":"from posixpath import basename\nfrom dbfread import DBF\nimport pandas as pd\nimport os\n\n\ndef readDBFFile(dbfFileLocation):\n\n dbfData = DBF(dbfFileLocation)\n\n return dbfData\n\ndef convertDBFPandas(dbfData):\n\n pdDataFrame = pd.DataFrame(iter(dbfData))\n\n return pdDataFrame\n\ndef subsetDBFDataFrame(dbfDataFrame, keepCols = ['ID','MEAN']):\n\n subsetData = dbfDataFrame[keepCols]\n\n return subsetData\n\ndef appendName(dbfDataFrame, appendName, originalValue = 'MEAN'):\n\n modifyDF = dbfDataFrame.rename({originalValue: appendName}, axis=1)\n \n return modifyDF\n\ndef combineDBFDF(dbfDF,dbfDFToAdd,colToMergeOn = 'ID'):\n\n if dbfDFToAdd.empty:\n return dbfDF\n\n mergedDF = pd.merge(dbfDF, dbfDFToAdd,\n how = 'inner', on = colToMergeOn,\n suffixes = ('','_delme'))\n\n columnToEdit = list(mergedDF.columns)[-1]\n toRemove = columnToEdit.removesuffix('_delme')\n changeDFColName = appendName(mergedDF, originalValue=columnToEdit,\n appendName = toRemove)\n\n return changeDFColName\n\ndef extractFileName(fileName):\n\n baseName = str(os.path.basename(os.path.normpath(fileName)))\n\n removeFileType = baseName.removesuffix('.dbf')\n\n return removeFileType\n\ndef iterateDBF(listOfDBF):\n\n toFillDF = pd.DataFrame()\n\n for oneFile in listOfDBF:\n\n fileName = extractFileName(oneFile)\n\n extractDBF = readDBFFile(oneFile)\n pdDF = convertDBFPandas(extractDBF)\n subsetDF = subsetDBFDataFrame(pdDF)\n modifiedDF = appendName(subsetDF,fileName)\n toFillDF = combineDBFDF(modifiedDF,toFillDF)\n\n return toFillDF\n\ndef readInCoeff(csvFile: str) ->pd.DataFrame:\n\n coeffData = pd.read_csv(csvFile)\n\n return coeffData\n\ndef mergeDataByID(coeffData, dbfData, colToMerge = 'ID'):\n\n mergedDF = pd.merge(coeffData, dbfData,\n how = 'inner', on = colToMerge)\n\n return mergedDF\n","sub_path":"exploratoryAnalysis/Python/DataAssimilation/readDBF.py","file_name":"readDBF.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"14637872","text":"data_path = 'file:///home/shoe/ravens_volume/test_data/185_baseball/TRAIN/dataset_TRAIN/tables/learningData.csv'\n#\n# import h2o\n# from h2o.automl import H2OAutoML\n#\n# h2o.init()\n#\n# data = h2o.import_file(data_path)\n#\n# data['TWORAVENS_FOLD_COLUMN'] = data.kfold_column(n_folds=4)\n# automl = H2OAutoML(\n# max_runtime_secs=30,\n# keep_cross_validation_predictions=True,\n# keep_cross_validation_fold_assignment=True)\n#\n# automl.train(y=\"Hall_of_Fame\", x=['At_bats', 'Runs'], training_frame=data, fold_column='TWORAVENS_FOLD_COLUMN')\n#\n# best_model = h2o.get_model(automl.leaderboard.as_data_frame()['model_id'][0])\n#\n# print(best_model.cross_validation_fold_assignment())\n#\n\nfrom supervised.automl import AutoML\nimport pandas as pd\n\ndataframe = pd.read_csv(data_path)\ndataframe = dataframe[dataframe['Hall_of_Fame'] != 2]\n\nautoml_mljar = AutoML(total_time_limit=30)\nautoml_mljar.fit(dataframe[['Runs', 'At_bats']], dataframe['Hall_of_Fame'])\n\nmljar_model = automl_mljar._models[0]\n\nmljar_model.train({\"train\": {\"X\": dataframe[['Runs', 'At_bats']], \"y\": dataframe['Hall_of_Fame']}})\nmljar_model.predict(dataframe[['Runs', 'At_bats']])\n\n\n# import mlbox.model.classification\n# import mlbox.model.regression\n#\n#\n# automl = mlbox.model.classification.Classifier()\n#\n# automl.fit(dataframe[['Runs', 'At_bats']], dataframe['Hall_of_Fame'])\n","sub_path":"dev_scripts/automl_testing.py","file_name":"automl_testing.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"410651095","text":"import sys,time\nimport numpy as np\nimport torch\nfrom torch import nn\nimport utils\n\n########################################################################################################################\n\nclass Appr(object):\n\n def __init__(self,model,nepochs=100,sbatch=32,lr=0.1,lr_min=1e-7,lr_factor=3,lr_patience=13,clipgrad=10000,lamb=0.75,smax=400,args=None):\n self.arch ='initmodel'\n if args.resume_path:#load pretrained model if necessary\n print('loading checkpoint {}'.format(args.resume_path))\n checkpoint = torch.load(args.resume_path)\n self.arch = checkpoint['arch']\n\n self.begin_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'],strict=False)\n print('warning:last layer is 2 classes')\n model.last = nn.Linear(model.last.in_features,10)\n model.last = model.last.cuda()\n\n # forgetting previous task\n for n,p in model.named_parameters():\n if n.startswith('e'):\n zero = torch.zeros_like(p.data)\n p.data = torch.where(p.data != 0, zero, p.data)\n\n self.model=model\n\n self.nepochs=nepochs\n self.sbatch=sbatch\n self.lr=lr\n self.lr_min=lr_min\n self.lr_factor=lr_factor\n self.lr_patience=lr_patience\n self.clipgrad=clipgrad\n\n self.ce=torch.nn.CrossEntropyLoss()\n self.optimizer=self._get_optimizer()\n\n self.lamb=lamb # Grid search = [0.1, 0.25, 0.5, 0.75, 1, 1.5, 2.5, 4]; chosen was 0.75\n self.smax=smax # Grid search = [25, 50, 100, 200, 400, 800]; chosen was 400\n if len(args.parameter)>=1:\n params=args.parameter.split(',')\n print('Setting parameters to',params)\n self.lamb=float(params[0])\n self.smax=float(params[1])\n\n self.mask_pre=None\n self.mask_back=None\n self.args=args\n\n return\n\n def _get_optimizer(self,lr=None):\n if lr is None: lr=self.lr\n return torch.optim.SGD(self.model.parameters(),lr=lr)\n\n def train(self,t,xtrain,ytrain,xvalid,yvalid):\n best_loss=np.inf\n best_model=utils.get_model(self.model)\n lr=self.lr\n #if int(t)<=0:lr=self.lr\n #else:lr=self.lr*0.1\n patience=self.lr_patience\n self.optimizer=self._get_optimizer(lr)\n\n # Loop epochs\n try:\n for e in range(self.nepochs):\n # Train\n clock0=time.time()\n self.train_epoch(t,xtrain,ytrain)\n clock1=time.time()\n train_loss,train_acc=self.eval(t,xtrain,ytrain)\n clock2=time.time()\n print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(e+1,\n 1000*self.sbatch*(clock1-clock0)/xtrain.size(0),1000*self.sbatch*(clock2-clock1)/xtrain.size(0),train_loss,100*train_acc),end='')\n # Valid\n valid_loss,valid_acc=self.eval(t,xvalid,yvalid)\n\n if (valid_loss0:\n classname = self.model.__class__.__name__\n\n for module in self.model.modules():\n if isinstance(module, nn.BatchNorm2d):\n module.eval()\n #if isinstance(module, nn.Linear):\n # module.eval()\n #print('fcmodule',module)\n #for param in self.model.parameters(): \n # param.requires_grad_(False)\n # print('param#~~',n,param)\n \"\"\"\n for n,p in self.model.named_parameters():\n if 'last' in n: p.requires_grad_(False)\n \"\"\"\n\n print_model=utils.get_model(self.model)\n \"\"\"print weighting for check\n for parameters in print_model:#.parameters():\n if 'last.weight' in parameters:#'bn2_1_d.running' in parameters:\n # parameters.requires_grad_(False)\n print('parameters_last',parameters,print_model[parameters])\n if 'bn2_1_d.running' in parameters:\n print('parameters_bn',parameters,print_model[parameters])\n \"\"\"\n r=np.arange(x.size(0))\n np.random.shuffle(r)\n r=torch.LongTensor(r).cuda()\n\n # Loop batches\n for i in range(0,len(r),self.sbatch):\n if i+self.sbatch<=len(r): b=r[i:i+self.sbatch]\n else: b=r[i:]\n images=torch.autograd.Variable(x[b],volatile=False)\n targets=torch.autograd.Variable(y[b],volatile=False)\n task=torch.autograd.Variable(torch.LongTensor([t]).cuda(),volatile=False)\n #s=(self.smax-1/self.smax)*i/len(r)+1/self.smax##\n s=1#20201127 delete anneal\n\n # Forward\n outputs,masks=self.model.forward(task,images,s=s)\n output=outputs[t]#single mask #[t]\n loss,_=self.criterion(output,targets,masks)\n\n # Backward\n self.optimizer.zero_grad()\n loss.backward()\n\n # Restrict layer gradients in backprop\n if t>0:\n for n,p in self.model.named_parameters():\n if n in self.mask_back:\n p.grad.data*=self.mask_back[n]\n\n # Compensate embedding gradients\n for n,p in self.model.named_parameters():\n if n.startswith('e'):\n num=torch.cosh(torch.clamp(s*p.data,-thres_cosh,thres_cosh))+1\n den=torch.cosh(p.data)+1\n p.grad.data*=self.smax/s*num/den##\n\n # Apply step\n torch.nn.utils.clip_grad_norm(self.model.parameters(),self.clipgrad)\n self.optimizer.step()\n\n # Constrain embeddings\n for n,p in self.model.named_parameters():\n if n.startswith('e'):\n p.data=torch.clamp(p.data,-thres_emb,thres_emb)\n\n #print(masks[-1].data.view(1,-1))\n #if i>=5*self.sbatch: sys.exit()\n #if i==0: print(masks[-2].data.view(1,-1),masks[-2].data.max(),masks[-2].data.min())\n #print(masks[-2].data.view(1,-1))\n\n return\n\n def eval(self,t,x,y):\n total_loss=0\n total_acc=0\n total_num=0\n self.model.eval()\n\n total_reg=0\n\n r=np.arange(x.size(0))\n r=torch.LongTensor(r).cuda()\n\n # Loop batches\n for i in range(0,len(r),self.sbatch):\n if i+self.sbatch<=len(r): b=r[i:i+self.sbatch]\n else: b=r[i:]\n images=torch.autograd.Variable(x[b],volatile=True)\n targets=torch.autograd.Variable(y[b],volatile=True)\n task=torch.autograd.Variable(torch.LongTensor([t]).cuda(),volatile=True)\n\n # Forward\n outputs,masks=self.model.forward(task,images,s=self.smax)\n output=outputs[t]#single mask #[t]\n loss,reg=self.criterion(output,targets,masks)\n #print(type(outputs))\n #print(type(output))\n _,pred=output.max(1)\n #print(_)\n #print(pred)\n hits=(pred==targets).float()\n\n # Log\n total_loss+=loss.data.cpu().numpy().item()*len(b)\n total_acc+=hits.sum().data.cpu().numpy().item()\n total_num+=len(b)\n total_reg+=reg.data.cpu().numpy().item()*len(b)\n\n print(' {:.3f} '.format(total_reg/total_num),end='')\n\n return total_loss/total_num,total_acc/total_num\n\n def criterion(self,outputs,targets,masks):\n reg=0\n count=0\n if self.mask_pre is not None:\n for m,mp in zip(masks,self.mask_pre):\n aux=1-mp\n reg+=(m*aux).sum()\n count+=aux.sum()\n else:\n for m in masks:\n reg+=m.sum()\n count+=np.prod(m.size()).item()\n reg/=count\n return self.ce(outputs,targets)+self.lamb*reg,reg\n\n def erase(self):\n # forgetting\n for n,p in self.model.named_parameters():\n if n.startswith('e'):\n zero = torch.zeros_like(p.data)\n p.data = torch.where(p.data < 0, zero, p.data)\n def reset_bn(self):\n #self.model.last = nn.Linear(self.model.last.in_features,10)\n #self.model.last = self.model.last.cuda()\n #bn 1-1\n #self.bn=torch.nn.BatchNorm2d(64,affine=False)\n self.model.bn1=torch.nn.BatchNorm2d(64, affine=False, track_running_stats=False)\n #2-1\n self.model.bn2_1_1=torch.nn.BatchNorm2d(64, affine=False, track_running_stats=False)\n self.model.bn2_1_2=torch.nn.BatchNorm2d(64, affine=False, track_running_stats=False)\n self.model.bn2_1_3=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n self.model.bn2_1_d=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n #self.bnt2_1=torch.nn.Embedding(len(self.taskcla),256)\n #2-2\n self.model.bn2_2_1=torch.nn.BatchNorm2d(64, affine=False, track_running_stats=False)\n self.model.bn2_2_2=torch.nn.BatchNorm2d(64, affine=False, track_running_stats=False)\n self.model.bn2_2_3=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n #2-3\n self.model.bn2_3_1=torch.nn.BatchNorm2d(64, affine=False, track_running_stats=False)\n self.model.bn2_3_2=torch.nn.BatchNorm2d(64, affine=False, track_running_stats=False)\n self.model.bn2_3_3=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n #3-1\n self.model.bn3_1_1=torch.nn.BatchNorm2d(128, affine=False, track_running_stats=False)\n self.model.bn3_1_2=torch.nn.BatchNorm2d(128, affine=False, track_running_stats=False)\n self.model.bn3_1_3=torch.nn.BatchNorm2d(512, affine=False, track_running_stats=False)\n self.model.bn3_1_d=torch.nn.BatchNorm2d(512, affine=False, track_running_stats=False)\n #self.ect3_1=torch.nn.Embedding(len(self.taskcla),512)\n #3-2\n self.model.bn3_2_1=torch.nn.BatchNorm2d(128, affine=False, track_running_stats=False)\n self.model.bn3_2_2=torch.nn.BatchNorm2d(128, affine=False, track_running_stats=False)\n self.model.bn3_2_3=torch.nn.BatchNorm2d(512, affine=False, track_running_stats=False)\n #3-3\n self.model.bn3_3_1=torch.nn.BatchNorm2d(128, affine=False, track_running_stats=False)\n self.model.bn3_3_2=torch.nn.BatchNorm2d(128, affine=False, track_running_stats=False)\n self.model.bn3_3_3=torch.nn.BatchNorm2d(512, affine=False, track_running_stats=False)\n #3-4\n self.model.bn3_4_1=torch.nn.BatchNorm2d(128, affine=False, track_running_stats=False)\n self.model.bn3_4_2=torch.nn.BatchNorm2d(128, affine=False, track_running_stats=False)\n self.model.bn3_4_3=torch.nn.BatchNorm2d(512, affine=False, track_running_stats=False)\n #4-1\n self.model.bn4_1_1=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n self.model.bn4_1_2=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n self.model.bn4_1_3=torch.nn.BatchNorm2d(1024, affine=False, track_running_stats=False)\n self.model.bn4_1_d=torch.nn.BatchNorm2d(1024, affine=False, track_running_stats=False)\n #self.ect3_1=torch.nn.Embedding(len(self.taskcla),512)\n #4-2\n self.model.bn4_2_1=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n self.model.bn4_2_2=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n self.model.bn4_2_3=torch.nn.BatchNorm2d(1024, affine=False, track_running_stats=False)\n #4-3\n self.model.bn4_3_1=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n self.model.bn4_3_2=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n self.model.bn4_3_3=torch.nn.BatchNorm2d(1024, affine=False, track_running_stats=False)\n #4-4\n self.model.bn4_4_1=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n self.model.bn4_4_2=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n self.model.bn4_4_3=torch.nn.BatchNorm2d(1024, affine=False, track_running_stats=False)\n #4-5\n self.model.bn4_5_1=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n self.model.bn4_5_2=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n self.model.bn4_5_3=torch.nn.BatchNorm2d(1024, affine=False, track_running_stats=False)\n #4-6\n self.model.bn4_6_1=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n self.model.bn4_6_2=torch.nn.BatchNorm2d(256, affine=False, track_running_stats=False)\n self.model.bn4_6_3=torch.nn.BatchNorm2d(1024, affine=False, track_running_stats=False)\n #5-1\n self.model.bn5_1_1=torch.nn.BatchNorm2d(512, affine=False, track_running_stats=False)\n self.model.bn5_1_2=torch.nn.BatchNorm2d(512, affine=False, track_running_stats=False)\n self.model.bn5_1_3=torch.nn.BatchNorm2d(2048, affine=False, track_running_stats=False)\n self.model.bn5_1_d=torch.nn.BatchNorm2d(2048, affine=False, track_running_stats=False)\n #self.ect3_1=torch.nn.Embedding(len(self.taskcla),512)\n #5-2\n self.model.bn5_2_1=torch.nn.BatchNorm2d(512, affine=False, track_running_stats=False)\n self.model.bn5_2_2=torch.nn.BatchNorm2d(512, affine=False, track_running_stats=False)\n self.model.bn5_2_3=torch.nn.BatchNorm2d(2048, affine=False, track_running_stats=False)\n #5-3\n self.model.bn5_3_1=torch.nn.BatchNorm2d(512, affine=False, track_running_stats=False)\n self.model.bn5_3_2=torch.nn.BatchNorm2d(512, affine=False, track_running_stats=False)\n self.model.bn5_3_3=torch.nn.BatchNorm2d(2048, affine=False, track_running_stats=False)\n\n########################################################################################################################\n","sub_path":"test/src/approaches/hat.py","file_name":"hat.py","file_ext":"py","file_size_in_byte":16391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"44271635","text":"import os, json, logging, logging.handlers\n\n# logger\nlogger = logging.getLogger('logger')\n\n# Set current dir\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\ndef getSecret(service, token='null'):\n \n with open(\"{0}/secrets.json\".format(dir_path)) as data:\n s = json.load(data)\n #print s\n #print s['{}'.format(service)]['{}'.format(token)]\n # If there is no token, return whole parent object\n if token == 'null':\n secret = s['{}'.format(service)]\n else:\n secret = s['{}'.format(service)]['{}'.format(token)]\n logger.debug(\"EXIT secrets: {}\".format(len(secret)))\n return secret","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"154992381","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/11/28 9:46\n# @Author : sch\n# @File : 2017_11_28.py\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nplt.style.use('ggplot')\nplt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签\nplt.rcParams['axes.unicode_minus'] = False #用来正常显示负号\n\nclient_credit = pd.read_csv('data_output/data_for_model/client_credit.csv', encoding='gbk')\nclient_credit_select = client_credit[client_credit['year'] == 2016].append(client_credit[client_credit['year'] == 2017])\nclient_credit_selected = client_credit_select[['year', 'client_name', 'credit_ratio']]\nclient_credit_selected['year'] =client_credit_selected['year'] - 1\n\nCSmodule = pd.read_clipboard()\nHImodule = pd.read_csv('data_output/ZTmodule/HImudule_index20171124V0.2.csv')\nHImodule = HImodule.drop('HI3', 1)\nOSmodule = pd.read_csv('data_output/ZTmodule/OSmudule_index20171127V0.3.csv')\nOSmodule = OSmodule.drop('OS5', 1)\nnew_cs_data = pd.merge(CSmodule, client_credit_selected, on=['year', 'client_name'], how='inner')\nnew_hi_data = pd.merge(HImodule, client_credit_selected, on=['year', 'client_name'], how='inner')\nnew_os_data = pd.merge(OSmodule, client_credit_selected, on=['year', 'client_name'], how='inner')\nnew_cs_data_all = new_cs_data.dropna()\nnew_hi_data_all = new_hi_data.dropna()\nnew_os_data_all = new_os_data.dropna()\n\n# pairplot()\n# var主要适用于分类变量,hue为想进行分类的指标\nsns.pairplot(new_cs_data_all, vars=['CS1', 'CS2', 'CS3', 'CS4', 'credit_ratio'])\nplt.show()\nsns.pairplot(new_cs_data_all, vars=['CS5', 'CS6', 'CS7', 'CS8', 'CS9', 'credit_ratio'])\nplt.show()\nsns.pairplot(new_hi_data_all, vars=['HI1', 'HI2', 'HI4', 'HI5', 'HI6', 'credit_ratio'])\nplt.show()\nsns.pairplot(new_os_data_all, vars=['OS1', 'OS2', 'OS3', 'OS4', 'credit_ratio'])\nplt.show()\nsns.pairplot(new_os_data_all, vars=['OS6', 'OS7', 'OS8', 'credit_ratio'])\nplt.show()\nfor i in ['CS1', 'CS2', 'CS3', 'CS4', 'CS5', 'CS6', 'CS7', 'CS8', 'CS9']:\n sns.jointplot(i, 'credit_ratio', new_cs_data_all)\nfor i in ['HI1', 'HI2', 'HI4', 'HI5', 'HI6']:\n sns.jointplot(i, 'credit_ratio', new_hi_data_all)\nfor i in ['OS1', 'OS2', 'OS3', 'OS4', 'OS6', 'OS7', 'OS8']:\n sns.jointplot(i, 'credit_ratio', new_os_data_all)\nsns.jointplot('CS1', 'credit_ratio', new_cs_data_all)\nplt.show()\n\n\n#\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn import cross_validation, metrics\nX1 = new_cs_data_all[['CS1', 'CS2', 'CS3', 'CS4', 'CS5', 'CS6', 'CS7', 'CS8', 'CS9']]\ny1 = new_cs_data_all['credit_ratio']\ny1 = y1.reset_index().drop('index', 1)\ny1_train = y1.loc[:30]\ny1_test = y1.loc[31:]\nclf1 = RandomForestRegressor(n_estimators=10, random_state=10)\nclf1.fit(X1, y1)\nclf1.predict(X1)\nX1_train = X1.loc[:30]\nX1_test = X1.loc[30:]\nclf1.fit(X1_train, y1_train)\nclf1.predict(X1_train)\nclf1.predict(X1_test)\nmetrics.mean_squared_error(y1, clf1.predict(X1))\n\nX2 = new_hi_data_all[['HI1', 'HI2', 'HI4', 'HI5', 'HI6']]\ny2 = new_hi_data_all['credit_ratio']\nclf2 = RandomForestRegressor(n_estimators=10,random_state=10)\nclf2.fit(X2, y2)\nclf2.predict(X2)\nmetrics.mean_squared_error(y2, clf2.predict(X2))\n\nX3 = new_os_data_all[['OS1', 'OS2', 'OS3', 'OS4', 'OS6', 'OS7', 'OS8']]\ny3 = new_os_data_all['credit_ratio'] * 100\nclf3 = RandomForestRegressor(n_estimators=100)\nclf3.fit(X3, y3)\nclf3.predict(X3)\nmetrics.mean_squared_error(y3, clf3.predict(X3))\n\n\nX1 = X1.reset_index().drop('index', 1)\ny1 = y1.reset_index().drop('index', 1)\nX1_train = X1.loc[:30]\nX1_test = X1.loc[31:]\ny1_train = y1.loc[:30]\ny1_test = y1.loc[30:]\nclf4 = RandomForestRegressor(n_estimators=20)\nclf4.fit(X1_train, y1_train)\nclf4.predict(X1_test)\nmetrics.confusion_matrix(y1_test, clf4.predict(X1_test))\n\nfrom sklearn.ensemble import RandomForestClassifier\ny11 = new_cs_data_all['credit_rating'].apply(lambda x: 0 if x == 'A' else 1 if x == 'B' else 2 if x == 'C' else 2 if x == 'D' else 2)\ny11 = y11.reset_index().drop('index', 1)\ny11_train = y11.loc[:30]\ny11_test = y11.loc[30:]\nclf5 = RandomForestClassifier(n_estimators=10, random_state=10)\nclf5.fit(X1_train, y11_train)\nclf5.predict(X1_train)\nclf5.predict(X1_test)\nmetrics.confusion_matrix(y11_train, clf5.predict(X1_train))\nmetrics.confusion_matrix(y11_test, clf5.predict(X1_test))\nclf5.fit(X1, y11)\nclf5.predict(X1)\nmetrics.confusion_matrix(y11, clf5.predict(X1))\n\n\ny22 = new_hi_data_all['credit_rating'].apply(lambda x: 0 if x == 'A' else 1 if x == 'B' else 2 if x == 'C' else 2 if x == 'D' else 2)\nX2 = new_hi_data_all[['HI1', 'HI2', 'HI4', 'HI5', 'HI6']]\ny2 = new_hi_data_all['credit_ratio']\nX2 = X2.reset_index().drop('index', 1)\ny2 = y2.reset_index().drop('index', 1)\ny22 = y22.reset_index().drop('index', 1)\nX2_train = X2.loc[:150]\nX2_test = X2.loc[151:]\ny2_train = y2.loc[:150]\ny2_test = y2.loc[151:]\ny22_train = y22.loc[:150]\ny22_test = y22.loc[151:]\nclf2 = RandomForestRegressor(n_estimators=10, random_state=10)\nclf22 = RandomForestClassifier(n_estimators=10, random_state=10)\nclf2.fit(X2, y2)\nclf2.predict(X2)\nclf2.fit(X2_train, y2_train)\nclf2.predict(X2_train)\nclf2.predict(X2_test)\nclf22.fit(X2, y22)\nclf22.predict(X2)\nclf22.fit(X2_train, y22_train)\nclf22.predict(X2_train)\nclf22.predict(X2_test)\n\n\ny33 = new_os_data_all['credit_rating'].apply(lambda x: 0 if x == 'A' else 1 if x == 'B' else 2 if x == 'C' else 2 if x == 'D' else 2)\nX3 = new_os_data_all[['OS1', 'OS2', 'OS3', 'OS4', 'OS6', 'OS7', 'OS8']]\ny3 = new_os_data_all['credit_ratio']\nX3 = X3.reset_index().drop('index', 1)\ny3 = y3.reset_index().drop('index', 1)\ny33 = y33.reset_index().drop('index', 1)\nX3_train = X3.loc[:80]\nX3_test = X3.loc[81:]\ny3_train = y3.loc[:80]\ny3_test = y3.loc[81:]\ny33_train = y33.loc[:80]\ny33_test = y33.loc[81:]\nclf3 = RandomForestRegressor(n_estimators=10, random_state=10)\nclf33 = RandomForestClassifier(n_estimators=10, random_state=10)\nclf3.fit(X3, y3)\nclf3.predict(X3)\nclf3.fit(X3_train, y3_train)\nclf3.predict(X3_train)\nclf3.predict(X3_test)\nclf33.fit(X3, y33)\nclf33.predict(X3)\nclf33.fit(X3_train, y33_train)\nclf33.predict(X3_train)\nclf33.predict(X3_test)\n\n\n\n\n\n\n\n\n","sub_path":"2017_11_28.py","file_name":"2017_11_28.py","file_ext":"py","file_size_in_byte":6086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"463141186","text":"import matplotlib.pyplot as plt\nimport numpy as np\nprint(\"Marco....\")\nimport uncertainties.unumpy as unp\nfrom uncertainties.unumpy import (\n nominal_values as noms,\n std_devs as stds,\n)\nfrom table import (\n make_table,\n make_SI,\n write,\n)\nfrom uncertainties import ufloat\n\nt6, t12, t18, t24, t30, t36, t42, t48, t54, t60 = np.genfromtxt('build/t.txt', unpack=True)\nt = unp.uarray([np.mean(t6),np.mean(t12),np.mean(t18),np.mean(t24),np.mean(t30),np.mean(t36),np.mean(t42),np.mean(t48),np.mean(t54),np.mean(t60)],[np.std(t6),np.std(t12),np.std(t18),np.std(t24),np.std(t30),np.std(t36),np.std(t42),np.std(t48),np.std(t54),np.std(t60)])\na = 43.8*10**(-2)\nb = 0.1*10**(-2)\ns = unp.uarray([a],[b])\n\ndv6 = np.sqrt((np.std(t6)/np.mean(t6))**2+(b/a)**2)*(a/np.mean(t6))\nrv6 = (a)/np.mean(t6)\nv6 = unp.uarray(rv6, dv6)\n\ndv12 = np.sqrt((np.std(t12)/np.mean(t12))**2+(b/a)**2)*(a/np.mean(t12))\nrv12 = (a)/np.mean(t12)\nv12 = unp.uarray(rv12, dv12)\n\ndv18 = np.sqrt((np.std(t18)/np.mean(t18))**2+(b/a)**2)*(a/np.mean(t18))\nrv18 = (a)/np.mean(t18)\nv18 = unp.uarray(rv18, dv18)\n\ndv24 = np.sqrt((np.std(t24)/np.mean(t24))**2+(b/a)**2)*(a/np.mean(t24))\nrv24 = (a)/np.mean(t24)\nv24 = unp.uarray(rv24, dv24)\n\ndv30 = np.sqrt((np.std(t30)/np.mean(t30))**2+(b/a)**2)*(a/np.mean(t30))\nrv30 = (a)/np.mean(t30)\nv30 = unp.uarray(rv30, dv30)\n\ndv36 = np.sqrt((np.std(t36)/np.mean(t36))**2+(b/a)**2)*(a/np.mean(t36))\nrv36 = (a)/np.mean(t36)\nv36 = unp.uarray(rv36, dv36)\n\ndv42 = np.sqrt((np.std(t42)/np.mean(t42))**2+(b/a)**2)*(a/np.mean(t42))\nrv42 = (a)/np.mean(t42)\nv42 = unp.uarray(rv42, dv42)\n\ndv48 = np.sqrt((np.std(t48)/np.mean(t48))**2+(b/a)**2)*(a/np.mean(t48))\nrv48 = (a)/np.mean(t48)\nv48 = unp.uarray(rv48, dv48)\n\ndv54 = np.sqrt((np.std(t54)/np.mean(t54))**2+(b/a)**2)*(a/np.mean(t54))\nrv54 = (a)/np.mean(t54)\nv54 = unp.uarray(rv54, dv54)\n\ndv60 = np.sqrt((np.std(t60)/np.mean(t60))**2+(b/a)**2)*(a/np.mean(t60))\nrv60 = (a)/np.mean(t60)\nv60 = unp.uarray(rv60, dv60)\n\nrv = np.array([rv6, rv12, rv18, rv24, rv30, rv36, rv42, rv48, rv54, rv60])\ndv = np.array([dv6, dv12, dv18, dv24, dv30, dv36, dv42, dv48, dv54, dv60])\n\nv = unp.uarray(rv, dv)\n\nrv = rv * 10**(2)\ndv = dv * 10**(2)\ngang = np.array([6, 12, 18, 24, 30, 36, 42, 48, 54, 60])\nwrite('build/geschwtabelle.tex', make_table([gang, rv, dv], [1, 2, 2])) # cm/s\nrv = rv * 10**(-2)\ndv = dv * 10**(-2)\nnp.savetxt('build/geschw.txt', np.column_stack([rv, dv]), header=\"v [m/s], Fehler [m/s]\")\n\n\ns = np.genfromtxt('build/s.txt', unpack=True)\nrf_0 = np.mean(s)\nwrite('build/rf_0.tex', make_SI(rf_0, r'\\per\\second', figures=5))\ndf_0 = np.std(s)\nwrite('build/df_0.tex', make_SI(df_0, r'\\per\\second', figures=5))\nf_0 = ufloat(rf_0, df_0)\n\nwrite('build/f_0.tex', make_SI(f_0, r'\\per\\second', figures=5))\n\nd = np.genfromtxt('build/d.txt', unpack=True)\nwl = np.array([d[2]-d[0], d[3]-d[1], d[4]-d[2], d[5]-d[3]])\nrwl = np.mean(wl)\ndwl = np.std(wl)\nwl = ufloat(rwl*10**(3), dwl*10**(3))\n\ndc = np.sqrt((dwl/rwl)**2+(df_0/rf_0)**2)*(rwl*rf_0)\nrc = (rwl*rf_0)\nc = ufloat(rc, dc)\n\nwrite('build/wl.tex', make_SI(wl, r'\\milli\\metre', figures=1))\nwrite('build/einsdurchwl.tex', make_SI(1/wl, r'\\per\\milli\\metre', figures=1))\nwrite('build/c.tex', make_SI(c, r'\\metre\\per\\second', figures=3))\n\nr6, r12, r18, r24, r30, r36, r42 = np.genfromtxt('build/r.txt', unpack=True)\nv6, v12, v18, v24, v30, v36 = np.genfromtxt('build/v.txt', unpack=True)\nrr6 = np.mean(r6)\nrr12 = np.mean(r12)\n\n#r = unp.uarray([rr6 = np.mean(r6)],[])\n\n\nr = unp.uarray([np.mean(v36), np.mean(v30), np.mean(v24), np.mean(v18), np.mean(v12), np.mean(v6), np.mean(r6), np.mean(r12), np.mean(r18), np.mean(r24), np.mean(r30), np.mean(r36), np.mean(r42)],[np.std(v36), np.std(v30), np.std(v24), np.std(v18), np.std(v12), np.std(v6), np.std(r6), np.std(r12), np.std(r18), np.std(r24), np.std(r30), np.std(r36), np.std(r42)])\nr = r*10\nwrite('build/test.tex', make_table([r], [2, 2]))\ndr = r - f_0\n#write('build/test.tex', make_SI(dr, r'\\milli\\metre', figures=1))\nrv = unp.uarray([rv36, rv30, rv24, rv18, rv12, rv6, -rv6, -rv12, -rv18, -rv24, -rv30, -rv36, -rv42], [dv36, dv30, dv24, dv18, dv12, dv6, dv6, dv12, dv18, dv24, dv30, dv36, dv42])\nrv = rv* 10**(2)\n\nz1 = np.array([rv36, rv30, rv24, rv18, rv12, rv6, -rv6, -rv12, -rv18, -rv24, -rv30, -rv36, -rv42])\nz2 = np.array([dv36, dv30, dv24, dv18, dv12, dv6, dv6, dv12, dv18, dv24, dv30, dv36, dv42]) # Das ist in m/s\nz1=z1 * 10**(2)\nz2=z2 * 10**(2) #jetzt in cm/s\n\nfrom scipy.optimize import curve_fit\n\ndef h(x, m, b):\n return m*x + b\nplt.errorbar(z1, unp.nominal_values(dr), xerr=z2, yerr=unp.std_devs(dr), fmt='r.', label=r'$\\text{Messwerte} \\; \\increment f')\nparameter, covariance = curve_fit(h, z1, unp.nominal_values(dr))\nx_plot = np.linspace(-36, 30, 10000)\n\nplt.plot(x_plot, h(x_plot, parameter[0], parameter[1]), 'b-', label=r'Ausgleichskurve', linewidth=1)\nfehler = np.sqrt(np.diag(covariance)) # Diagonalelemente der Kovarianzmatrix stellen Varianzen dar\n\nm_fit = ufloat(parameter[0], fehler[0])\nb_fit = ufloat(parameter[1], fehler[1])\n\nwrite('build/propfak_1.tex', make_SI(m_fit, r'\\per\\Square\\metre', figures=1))\n#write('build/fit_1_b.tex', make_SI(b_fit*1000, r'\\nothing\\tothe{-3}', figures=1))\nplt.ylabel(r'$\\increment f / s^{-1}$')\nplt.xlabel(r'$v \\ /\\ cm/s^{-1}$')\nplt.legend(loc='best')\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08) # Diese Zeile bitte in Zukunft nicht vergessen sonst unschön! <--- Du hast sie wieder raus genommen!!! >.<\nplt.savefig('build/1plot.pdf')\n\nplt.clf()\n\n\n\nwrite('build/divtabelle_1.tex', make_table([rv, dr], [2, 2 ,2 ,2])) # wird angezeigt in v [cm/s], delta v [cm/s], diff f [1/s], Fehler diff f [1/s]\n\ni6, i12, i18, i24, i30 = np.genfromtxt('build/i.txt', unpack=True)\ni = unp.uarray([np.mean(i30), np.mean(i24), np.mean(i18), np.mean(i12), np.mean(i6)], [np.std(i30), np.std(i24), np.std(i18), np.std(i12), np.std(i6)])\nrv = unp.uarray([rv30, rv24, rv18, rv12, rv6], [dv30, dv24, dv18, dv12, dv6])\nrv = rv* 10**(2)\ni = i*5\n\ndef h(x, m, b):\n return m*x + b\nplt.errorbar(unp.nominal_values(rv), unp.nominal_values(i), xerr=unp.std_devs(rv), yerr=unp.std_devs(i), fmt='r.', label=r'$\\text{Messwerte} \\; \\increment f$')\nparameter, covariance = curve_fit(h, unp.nominal_values(rv), unp.nominal_values(i))\nx_plot = np.linspace(5, 26, 10000)\n\nplt.plot(x_plot, h(x_plot, parameter[0], parameter[1]), 'b-', label=r'Ausgleichskurve', linewidth=1)\nfehler = np.sqrt(np.diag(covariance)) # Diagonalelemente der Kovarianzmatrix stellen Varianzen dar\n\nm_fit = ufloat(parameter[0], fehler[0])\nb_fit = ufloat(parameter[1], fehler[1])\n\nwrite('build/propfak_2.tex', make_SI(m_fit, r'\\per\\Square\\metre', figures=1))\n#write('build/fit_1_m.tex', make_SI(m_fit*1000, r'\\nothing\\tothe{-3}', figures=1))\n#write('build/fit_1_b.tex', make_SI(b_fit*1000, r'\\nothing\\tothe{-3}', figures=1))\nplt.ylabel(r'$\\increment f / s^{-1}$')\nplt.xlabel(r'$v \\ /\\ cm/s^{-1}$')\nplt.legend(loc='best')\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08) # Diese Zeile bitte in Zukunft nicht vergessen sonst unschön! <--- Du hast sie wieder raus genommen!!! >.<\nplt.savefig('build/2plot.pdf')\n\nwrite('build/divtabelle_2.tex', make_table([rv, i], [2, 2 ,1 ,1])) # wird angezeigt in v [cm/s], delta v [cm/s], diff f [1/s], Fehler diff f [1/s]\n\n#write('build/rv6.tex', make_SI(rv6, r'\\metre\\per\\second', figures=5))\n#write('build/dv6.tex', make_SI(dv6, r'\\metre\\per\\second', figures=5))\n#write('build/v6.tex', make_SI(v6, r'\\metre\\per\\second', figures=5))\n\n\n## Beispielplot\n#x = np.linspace(0, 10, 1000)\n#y = x ** np.sin(x)\n#plt.plot(x, y, label='Kurve')\n#plt.xlabel(r'$\\alpha \\:/\\: \\si{\\ohm}$')\n#plt.ylabel(r'$y \\:/\\: \\si{\\micro\\joule}$')\n#plt.legend(loc='best')\n#\n## in matplotlibrc leider (noch) nicht möglich\n#plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\n#plt.savefig('build/plot.pdf')\n#\n#\n## Beispieltabelle\n#a = np.linspace(1, 10, 10)\n#b = np.linspace(11, 20, 10)\n#write('build/tabelle.tex', make_table([a, b], [4, 2])) # [4,2] = Nachkommastellen\n#\n#\n## Beispielwerte\n#\n#\n#c = ufloat(0, 0)\n#write('build/wert_a.tex', make_SI(c*1e3, r'\\joule\\per\\kelvin\\per\\gram' ))\nprint(\"....Polo\")\n","sub_path":"zeugs/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":8040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"28896876","text":"import numpy as np\nimport cv2\nimport csv\nimport os\n\nim = cv2.imread(\"image1.jpg\")\n#xmin xmax ymin ymax\n#5.66024780e-02\n#3.23323309e-02\n#8.58804226e-01\n#3.19813311e-01\n\n\nwith open('illegal.csv', mode='w') as illegalFile:\n fieldnames = ['image_name', 'illegal']\n writer = csv.DictWriter(illegalFile, fieldnames=fieldnames)\n writer.writeheader()\n\n # detect bike\n weight = 593\n height = 645\n ymin = 2.6492548e-01* weight\n xmin = 5.2693754e-01* height\n ymax = 7.4511099e-01* weight\n xmax = 1.0000000e+00* height\n\n x = int(xmin)\n y = int(ymin)\n w = int(xmax - xmin)\n h = int(ymax - ymin)\n\n # detect non helmet\n yymin = 2.3673776e-01 * weight\n xxmin = 6.1415607e-01 * height\n yymax = 3.2613114e-01 * weight\n xxmax = 6.9770998e-01 * height\n\n # license\n #yymin = 4.6882585e-01 * weight\n #xxmin = 9.4870925e-03 * height\n #yymax = 5.7634401e-01 * weight\n #xxmax = 2.8340721e-01 * height\n\n xx = int(xxmin)\n yy = int(yymin)\n ww = int(xxmax - xxmin)\n hh = int(yymax - yymin)\n\n # box motocycle\n cv2.rectangle(im, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n x1 = max(xmin, xxmin)\n y1 = max(ymin, yymin)\n x2 = min(xmax, xxmax)\n y2 = min(ymax, yymax)\n print(x1)\n print(x2)\n\n # w y w h for overlap\n ox = int(x2)\n oy = int(y2)\n ow = int(x1 - x2)\n oh = int(y1 - y2)\n\n # box helmet\n # cv2.rectangle(im, (xx, yy), (xx + ww, yy + hh), (0, 0, 255), 2)\n\n # check overlap\n cv2.rectangle(im, (ox, oy), (ox + ow, oy + oh), (0, 255, 255), 2)\n if((xmax > x2 and xmin < x1) and (ymax > y2 and ymin < y2)):\n writer.writerow({'image_name': 'image1.jpg', 'illegal': 'Yes'}) \n print(\"illegal for non wearing helmet\")\n else:\n print(\"not illegal\")\n \n crop_img = im[y:y+h, x:x+w]\n cv2.imshow(\"cropped{}\".format('1'),im)\n # cv2.imshow(\"cropped\", im)\n PATH_TO_TEST_IMAGES_DIR = \"./ImageIllegal\"\n cv2.imwrite( os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(1)), crop_img )\n # cv2.rectangle(im, (x, y), (x + w, y + h), (0, 0, 255), 2)\n # cv2.imshow('bwmask', im)\n\n","sub_path":"detecting.py","file_name":"detecting.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"42643518","text":"import os\nimport random\n\nimport numpy as np\n\nimport keras\nfrom keras.utils import np_utils\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, Conv2D, MaxPooling2D\n\n\nfrom imageio import imread, imsave\n\n\ndef load_stuff():\n\n base_path = 'test_pos'\n images = []\n for n in range(100):\n fpath = os.path.join(base_path, 'section{}.png'.format(n))\n images.append(imread(fpath))\n base_path = 'test_neg'\n for n in range(100):\n fpath = os.path.join(base_path, 'section{}.png'.format(n))\n images.append(imread(fpath))\n stack = np.transpose(images, (0, 1, 2))\n n_images, xdim, ydim = stack.shape\n x_test = stack.reshape(n_images, xdim, ydim, 1)\n x_test = x_test.astype('float32')\n x_test /= 255\n y_test = [1] * 100 + [0] * 100\n y_test = np_utils.to_categorical(y_test)\n\n train_size = 100\n # Channels last\n base_path = 'positive_examples'\n images = []\n for n in range(train_size):\n fpath = os.path.join(base_path, 'section{}.png'.format(n))\n images.append(imread(fpath))\n base_path = 'negative_examples'\n for n in range(train_size):\n fpath = os.path.join(base_path, 'section{}.png'.format(n))\n images.append(imread(fpath))\n stack = np.transpose(images, (0, 1, 2))\n\n n_images, xdim, ydim = stack.shape\n\n # Channels last\n x_train = stack.reshape(n_images, xdim, ydim, 1)\n x_train = x_train.astype('float32')\n x_train /= 255\n\n y_train = [1] * train_size + [0] * train_size\n\n y_train = np_utils.to_categorical(y_train)\n print(y_train)\n sys.exit(0)\n\n model = Sequential()\n\n model.add(Convolution2D(32, (3, 3), activation='relu', input_shape=(51, 51, 1)))\n model.add(Convolution2D(32, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n model.fit(x_train, y_train, batch_size=32, epochs=10, verbose=1)\n\n score = model.evaluate(x_test, y_test, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n\n model.save('my_model.h5')\n\n\ndef load_data_from_disk_images(base_path):\n \"\"\"Load images found at base_path and convert into x_train and y_train\n arrays suitable for training keras models.\"\"\"\n\n wall_root = os.path.join(base_path, \"wall\")\n wall_filenames = os.listdir(wall_root)\n wall_fpaths_labels = [\n (os.path.join(wall_root, fn), 1)\n for fn in wall_filenames\n ]\n not_wall_root = os.path.join(base_path, \"not_wall\")\n wall_filenames = os.listdir(not_wall_root)\n not_wall_fpaths_labels = [\n (os.path.join(not_wall_root, fn), 0)\n for fn in wall_filenames\n ]\n\n fpaths_labels = wall_fpaths_labels + not_wall_fpaths_labels\n random.shuffle(fpaths_labels)\n\n images = []\n labels = []\n for fpath, label in fpaths_labels:\n images.append(imread(fpath))\n labels.append(label)\n stack = np.transpose(images, (0, 1, 2))\n n_images, xdim, ydim = stack.shape\n\n # Channels last\n x_train = stack.reshape(n_images, xdim, ydim, 1)\n x_train = x_train.astype('float32')\n x_train /= 255\n\n y_train = np.array(labels)\n y_train = np_utils.to_categorical(y_train)\n\n return x_train, y_train\n\ndef train_model_model():\n\n x_train, y_train = load_data_from_disk_images(\"data/train\")\n x_test, y_test = load_data_from_disk_images(\"data/test\")\n\n inputs = Input((51, 51, 1))\n conv = Conv2D(32, (3, 3), padding='same', activation='relu')(inputs)\n flatten = Flatten()(conv)\n dense1 = Dense(128, activation='relu')(flatten)\n dense2 = Dense(2, activation='softmax')(dense1)\n\n model = Model(input=inputs, output=dense2)\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n model.fit(x_train, y_train, batch_size=32, epochs=10, verbose=1)\n\n\ndef train_model():\n\n x_train, y_train = load_data_from_disk_images(\"data/train\")\n x_test, y_test = load_data_from_disk_images(\"data/test\")\n\n model = Sequential()\n\n model.add(Convolution2D(32, (3, 3), activation='relu', input_shape=(51, 51, 1)))\n model.add(Convolution2D(32, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n model.fit(x_train, y_train, batch_size=32, epochs=10, verbose=1)\n\n score = model.evaluate(x_test, y_test, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n\n model.save('my_model.h5')\n\n\ndef main():\n # load_stuff()\n\n train_model_model()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":5093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"97898472","text":"def read_lst(name):\n with open(name, 'r') as f:\n lst = f.readline().strip().split(\",\")\n return list(map(int, lst))\n\n\ndef rev_list(lst, index, length):\n rev = lst[:]\n for i in range(length):\n rev[(index + i) % len(lst)] = lst[(index + length - i - 1) % len(lst)]\n return rev\n\n\ndef twist(inps, lst=list(range(256)), index=0, skip=0):\n for length in inps:\n lst = rev_list(lst, index, length)\n index = (index + length + skip) % len(lst)\n skip += 1\n return lst, index, skip\n\n\ndef hash(inps, iters=64):\n inps = list(map(ord, inps)) + [17, 31, 73, 47, 23]\n\n lst, index, skip = twist(inps)\n\n for _ in range(iters-1):\n lst, index, skip = twist(inps, lst, index, skip)\n\n final = \"\"\n for i in range(16):\n sub = lst[i * 16:(i + 1) * 16]\n\n h = sub[0]\n for c in sub[1:]:\n h = h ^ c\n final += \"{:02x}\".format(h)\n\n return final\n\n\nif __name__ == \"__main__\":\n inps = read_lst('10.txt')\n\n ##\n ## Part 1\n ##\n (lst, idx, skip) = twist(inps)\n print(lst[0] * lst[1])\n\n\n ##\n ## Part 2\n ##\n print(hash(\",\".join(map(str, inps))))\n\n","sub_path":"day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"629757942","text":"# -*- coding: utf-8 -*-\n# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nimport argparse\nimport time\n\n# pylint: disable=import-error\nimport model as snet_model\n\nimport megengine\nimport megengine.data as data\nimport megengine.data.transform as T\nimport megengine.distributed as dist\nimport megengine.functional as F\n\nlogging = megengine.logger.get_logger()\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"MegEngine ImageNet Training\")\n parser.add_argument(\"-d\", \"--data\", metavar=\"DIR\", help=\"path to imagenet dataset\")\n parser.add_argument(\n \"-a\",\n \"--arch\",\n default=\"shufflenet_v2_x1_0\",\n help=\"model architecture (default: shufflenet_v2_x1_0)\",\n )\n parser.add_argument(\n \"-n\",\n \"--ngpus\",\n default=None,\n type=int,\n help=\"number of GPUs per node (default: None, use all available GPUs)\",\n )\n parser.add_argument(\n \"-m\", \"--model\", metavar=\"PKL\", default=None, help=\"path to model checkpoint\"\n )\n\n parser.add_argument(\"-j\", \"--workers\", default=2, type=int)\n parser.add_argument(\n \"-p\",\n \"--print-freq\",\n default=20,\n type=int,\n metavar=\"N\",\n help=\"print frequency (default: 10)\",\n )\n\n parser.add_argument(\"--dist-addr\", default=\"localhost\")\n parser.add_argument(\"--dist-port\", default=23456, type=int)\n parser.add_argument(\"--world-size\", default=1, type=int)\n parser.add_argument(\"--rank\", default=0, type=int)\n\n args = parser.parse_args()\n\n if args.ngpus is None:\n args.ngpus = dist.helper.get_device_count_by_fork(\"gpu\")\n\n if args.world_size * args.ngpus > 1:\n dist_worker = dist.launcher(\n master_ip=args.dist_addr,\n port=args.dist_port,\n world_size=args.world_size * args.ngpus,\n rank_start=args.rank * args.ngpus,\n n_gpus=args.ngpus\n )(worker)\n dist_worker(args)\n else:\n worker(args)\n\n\ndef worker(args):\n # build dataset\n _, valid_dataloader = build_dataset(args)\n\n # build model\n model = snet_model.__dict__[args.arch](pretrained=args.model is None)\n if args.model is not None:\n logging.info(\"load from checkpoint %s\", args.model)\n checkpoint = megengine.load(args.model)\n if \"state_dict\" in checkpoint:\n state_dict = checkpoint[\"state_dict\"]\n model.load_state_dict(state_dict)\n\n def valid_step(image, label):\n logits = model(image)\n loss = F.nn.cross_entropy(logits, label)\n acc1, acc5 = F.topk_accuracy(logits, label, topk=(1, 5))\n # calculate mean values\n if dist.get_world_size() > 1:\n loss = F.distributed.all_reduce_sum(loss) / dist.get_world_size()\n acc1 = F.distributed.all_reduce_sum(acc1) / dist.get_world_size()\n acc5 = F.distributed.all_reduce_sum(acc5) / dist.get_world_size()\n return loss, acc1, acc5\n\n model.eval()\n _, valid_acc1, valid_acc5 = valid(valid_step, valid_dataloader, args)\n logging.info(\n \"Test Acc@1 %.3f, Acc@5 %.3f\",\n valid_acc1,\n valid_acc5,\n )\n\n\ndef valid(func, data_queue, args):\n objs = AverageMeter(\"Loss\")\n top1 = AverageMeter(\"Acc@1\")\n top5 = AverageMeter(\"Acc@5\")\n clck = AverageMeter(\"Time\")\n\n t = time.time()\n for step, (image, label) in enumerate(data_queue):\n image = megengine.tensor(image, dtype=\"float32\")\n label = megengine.tensor(label, dtype=\"int32\")\n\n n = image.shape[0]\n\n loss, acc1, acc5 = func(image, label)\n\n objs.update(loss.item(), n)\n top1.update(100 * acc1.item(), n)\n top5.update(100 * acc5.item(), n)\n clck.update(time.time() - t, n)\n t = time.time()\n\n if step % args.print_freq == 0 and dist.get_rank() == 0:\n logging.info(\"Test step %d, %s %s %s %s\", step, objs, top1, top5, clck)\n\n return objs.avg, top1.avg, top5.avg\n\n\ndef build_dataset(args):\n train_dataloader = None\n valid_dataset = data.dataset.ImageNet(args.data, train=False)\n valid_sampler = data.SequentialSampler(\n valid_dataset, batch_size=100, drop_last=False\n )\n valid_dataloader = data.DataLoader(\n valid_dataset,\n sampler=valid_sampler,\n transform=T.Compose(\n [\n T.Resize(256),\n T.CenterCrop(224),\n T.Normalize(\n mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395]\n ), # BGR\n T.ToMode(\"CHW\"),\n ]\n ),\n num_workers=args.workers,\n )\n return train_dataloader, valid_dataloader\n\n\nclass AverageMeter:\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=\":.3f\"):\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = \"{name} {val\" + self.fmt + \"} ({avg\" + self.fmt + \"})\"\n return fmtstr.format(**self.__dict__)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"official/vision/classification/shufflenet/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"641483136","text":"\n\"\"\"Erlang External Term Format serializer/deserializer\"\"\"\n\n__version__ = \"2.0.0\"\n__license__ = \"BSD\"\n\nfrom erlterm.codec import ErlangTermDecoder, ErlangTermEncoder, ErlangStrDecoder\nfrom erlterm.types import *\n\nencode = ErlangTermEncoder().encode\ndecode = ErlangTermDecoder().decode\ndecode_from_str = ErlangStrDecoder().parse\n# import struct\n# import sys\n# def mailbox_gen():\n# while True:\n# len_bin = sys.stdin.buffer.read(4)\n# if len(len_bin) != 4: return None\n# (length,) = struct.unpack('!I',len_bin)\n# yield decode(sys.stdin.buffer.read(length))\n# def port_gen():\n# while True:\n# term = encode((yield))\n# sys.stdout.buffer.write(struct.pack('!I',len(term)))\n# sys.stdout.buffer.write(term)\n# def port_connection():\n# port = port_gen()\n# next(port)\n# return mailbox_gen(),port\n","sub_path":"erlterm/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"520482043","text":"#-*- coding: utf-8 -*-\n\nfrom urllib import request\nimport json\nimport re\nfrom bs4 import BeautifulSoup\nimport testutil\nimport sys\nimport python_rdb as db\n\nclass Subway():\n def setData(self, tup):\n self.id, self.station_cd, self.station_nm, self.line_num = tup\n def __repr__(self):\n return self.station_nm\n def __str__(self):\n return self.station_nm\n\n\n#python_rdb.makeTable()\n\ndef insertSubwayListToDB():\n for i in range(1,10):\n subwayListQuery = testutil.SubwayListData()\n subwayListQuery.setLineNum(i)\n\n u = request.urlopen(subwayListQuery.makeQueryURL())\n data = u.read()\n encoding = u.info().get_content_charset('utf-8')\n\n json_data = json.loads(data.decode(encoding))\n stationList = json_data[subwayListQuery.service_name]['row']\n # db.insertSubwayList(stationList)\n\ndef getShortestRoute(origin,dest):\n shortestRouteQuery = testutil.SubwayShortestRouteData()\n shortestRouteQuery.setStationNames(origin,dest)\n\n u = request.urlopen(shortestRouteQuery.makeQueryURL())\n data = u.read()\n encoding = u.info().get_content_charset('utf-8')\n json_data = json.loads(data.decode(encoding))\n print(json_data)\n# shortestRoute = json_data[subwayListQuery.service_name]['row']v\n\nsubway_list = []\ni = 0\nfor tup in db.getSubwayList():\n s = Subway()\n s.setData(tup)\n subway_list.append(s)\n i+=1\n \norigin = subway_list[2]\ndest = subway_list[20]\n\nprint(origin)\nprint(dest)\nprint(i)\n\ngetShortestRoute(subway_list[2].station_nm,subway_list[420].station_nm)\n\n#print(str(data,\"utf-8\"))\n","sub_path":"crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"155909449","text":"from modeller import *\nfrom modeller.automodel import * # Load the automodel class\n\nlog.verbose()\nenv = environ()\n\n# directories for input atom files\nenv.io.atom_files_directory = ['.', '../atom_files']\n\nenv.io.hetatm = True\nenv.io.water = True\n\nclass MyModel(automodel):\n def special_restraints(self, aln):\n rsr = self.restraints\n at = self.atoms\n\n# Residues 510 through 513 should be an alpha helix:\n rsr.add(secondary_structure.alpha(self.residue_range('506:', '513:')))\n\n\na = MyModel(env, alnfile = 'alignment.ali',\n knowns = '3tt1_chainA_proteinintsolv', sequence = 'LeuT_fill')\na.starting_model= 1\na.ending_model = 1\n\n#a.md_level = None # No refinement of model\n\na.make()\n\n","sub_path":"old/3tt1/setup/fill_residue.py","file_name":"fill_residue.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23490295","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# Copyright 2014 Techdealer\n\n##############BIBLIOTECAS A IMPORTAR E DEFINICOES####################\nimport urllib,urllib2,re,xbmcplugin,xbmcgui,sys,xbmc,xbmcaddon,xbmcvfs,socket,HTMLParser\nimport json\nh = HTMLParser.HTMLParser()\n\naddon_id = 'plugin.video.replaypt'\nselfAddon = xbmcaddon.Addon(id=addon_id)\naddonfolder = selfAddon.getAddonInfo('path')\nartfolder = '/resources/img/'\n\nwebdocumentarios_url = 'http://www.webdocumentarios.com/'\n##################################################\n\ndef CATEGORIES_webdocumentarios():\n\taddDir('[B]Mudar para categorias[/B]',webdocumentarios_url,440,addonfolder+artfolder+'webdocumentarios.png',True)\n\tlistar_episodios(webdocumentarios_url)\n\ndef alterar_vista(url):\n\taddDir('[B]Mudar para últimas[/B]',url,437,addonfolder+artfolder+'webdocumentarios.png');\n\ttry:\n\t\tcodigo_fonte = abrir_url(url)\n\texcept:\n\t\tcodigo_fonte = ''\n\tif codigo_fonte:\n\t\tmatch = re.search('

Categorias

(.+?)
', codigo_fonte, re.DOTALL)\n\t\tif match:\n\t\t\taddDir('[COLOR blue][B]Categorias[/B][/COLOR]','',437,addonfolder+artfolder+'webdocumentarios.png',False)\n\t\t\tmatch_2 = re.findall('(.+?)', match.group(1))\n\t\t\tfor link, name in match_2:\n\t\t\t\taddDir(h.unescape(name).encode('utf-8'),link,438,addonfolder+artfolder+'webdocumentarios.png')\n\t\tmatch = re.search('

Tags

(.+?)', codigo_fonte, re.DOTALL)\n\t\tif match:\n\t\t\taddDir('[COLOR blue][B]Tags[/B][/COLOR]','',437,addonfolder+artfolder+'webdocumentarios.png',False)\n\t\t\tmatch_2 = re.findall('(.+?)', match.group(1))\n\t\t\tfor link, name in match_2:\n\t\t\t\taddDir(name,link,438,addonfolder+artfolder+'webdocumentarios.png')\n\ndef listar_episodios(url):\n\ttry:\n\t\tcodigo_fonte = abrir_url(url)\n\texcept:\n\t\tcodigo_fonte = ''\n\tif codigo_fonte:\n\t\tmatch = re.findall('
.*?.*?(.+?).*?
', codigo_fonte, re.DOTALL)\n\t\tfor iconimage, link, name in match:\n\t\t\taddDir(name,link,439,iconimage,False)\n\t\thtml_pagination = re.search('
  • \\n»\\n
  • ', codigo_fonte)\n\t\tif html_pagination:\n\t\t\taddDir('[B]Próxima >>[/B]',html_pagination.group(1),438,addonfolder+artfolder+'webdocumentarios.png')\n\ndef procurar_fontes(url,name,iconimage):\n\tprogress = xbmcgui.DialogProgress()\n\tprogress.create('Replay PT', 'Procurando fontes...')\n\tprogress.update(0)\n\ttry:\n\t\tcodigo_fonte = abrir_url(url)\n\texcept:\n\t\tcodigo_fonte = ''\n\tif codigo_fonte:\n\t\tjwplayer = re.search(\"[^_]file:'(.+?)',\", codigo_fonte)\n\t\tif jwplayer:\n\t\t\tif jwplayer.group(1).find('youtube') > -1:\n\t\t\t\tvideo_url = youtube_resolver(jwplayer.group(1))\n\t\t\telse:\n\t\t\t\tvideo_url = jwplayer.group(1)\n\t\tif progress.iscanceled():\n\t\t\tsys.exit(0)\n\t\tprogress.update(100)\n\t\tprogress.close()\n\t\tif video_url:\n\t\t\tif video_url != 'youtube_nao resolvido':\n\t\t\t\tlistitem = xbmcgui.ListItem(label=name, iconImage=str(iconimage), thumbnailImage=str(iconimage), path=url)\n\t\t\t\tlistitem.setProperty('IsPlayable', 'true')\n\t\t\t\ttry:\n\t\t\t\t\txbmc.Player().play(item=video_url, listitem=listitem)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tdialog = xbmcgui.Dialog()\n\t\t\t\tok = dialog.ok('Replay PT', 'Erro ao resolver youtube...')\n\t\telse:\n\t\t\tdialog = xbmcgui.Dialog()\n\t\t\tok = dialog.ok('Replay PT', 'Nenhuma fonte suportada encontrada...')\n\ndef youtube_resolver(url):\n\tmatch = re.compile('.*?youtube.com/embed/([^?\"]+).*?').findall(url)\n\tif match:\n\t\treturn 'plugin://plugin.video.youtube/?action=play_video&videoid=' + str(match[0])\n\tmatch = re.compile('.*?youtube.com/watch\\?v=([^&\"]+).*?').findall(url)\n\tif match:\n\t\treturn 'plugin://plugin.video.youtube/?action=play_video&videoid=' + str(match[0])\n\treturn 'youtube_nao resolvido'\n\t\t\t\t\t\t\n############################################################################################################################\n\ndef abrir_url(url):\n\treq = urllib2.Request(url)\n\treq.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')\n\tresponse = urllib2.urlopen(req)\n\tlink=response.read()\n\tresponse.close()\n\treturn link\n\ndef addDir(name,url,mode,iconimage,pasta=True):\n u=sys.argv[0]+\"?url=\"+urllib.quote_plus(url)+\"&mode=\"+str(mode)+\"&name=\"+urllib.quote_plus(name)+\"&iconimage=\"+urllib.quote_plus(iconimage)\n ok=True\n liz=xbmcgui.ListItem(name, iconImage=\"DefaultFolder.png\", thumbnailImage=iconimage)\n ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=pasta)\n return ok\n \n############################################################################################################\n \ndef get_params():\n param=[]\n paramstring=sys.argv[2]\n if len(paramstring)>=2:\n params=sys.argv[2]\n cleanedparams=params.replace('?','')\n if (params[len(params)-1]=='/'):\n params=params[0:len(params)-2]\n pairsofparams=cleanedparams.split('&')\n param={}\n for i in range(len(pairsofparams)):\n splitparams={}\n splitparams=pairsofparams[i].split('=')\n if (len(splitparams))==2:\n param[splitparams[0]]=splitparams[1]\n \n return param\n \nparams=get_params()\nurl=None\nname=None\nmode=None\niconimage=None\n\ntry:\n url=urllib.unquote_plus(params[\"url\"])\nexcept:\n pass\ntry:\n name=urllib.unquote_plus(params[\"name\"])\nexcept:\n pass\ntry:\n mode=int(params[\"mode\"])\nexcept:\n pass\ntry: \n iconimage=urllib.unquote_plus(params[\"iconimage\"])\nexcept:\n pass\n\n#print \"Mode: \"+str(mode)\n#print \"URL: \"+str(url)\n#print \"Name: \"+str(name)\n#print \"Iconimage: \"+str(iconimage)","sub_path":"plugin.video.replaypt/webdocumentarios.py","file_name":"webdocumentarios.py","file_ext":"py","file_size_in_byte":5908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"246650596","text":"import torch\nimport string, re\nimport torchtext\nfrom torchtext.data import Field, Iterator, TabularDataset\n\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torchkeras\n\nMAX_WORDS = 10000 # 仅考虑最高频的10000个词\nMAX_LEN = 200 # 每个样本保留200个词的长度\nBATCH_SIZE = 20\n\n\n# 过滤点低频词\ndef filter_low_freq_words(arr, vocab):\n arr = [[x if x < MAX_WORDS else 0 for x in example] for example in arr]\n return arr\n\n\n# 1.定义各个字段的预处理方法\nTEXT = Field(\n sequential=True,\n tokenize=lambda x: re.sub('[%s]' % string.punctuation, \"\", x).split(\" \"),\n lower=True,\n fix_length=MAX_LEN,\n postprocessing=filter_low_freq_words)\nLABEL = Field(sequential=False, use_vocab=False)\n\n# 2.构建表格型dataset\nds_train, ds_test = TabularDataset.splits(path='./data/',\n train='train.tsv',\n test='test.tsv',\n format='tsv',\n fields=[('label', LABEL),\n ('text', TEXT)],\n skip_header=False)\n\n# 3.构建词典\nTEXT.build_vocab(ds_train)\n\n# 4.构建数据管道迭代器\ntrain_iter, test_iter = Iterator.splits((ds_train, ds_test),\n sort_within_batch=True,\n sort_key=lambda x: len(x.text),\n batch_sizes=(BATCH_SIZE, BATCH_SIZE),\n device='cuda:4')\n\n\n# 将数据管道组织成torch.utils.data.DataLoader相似的features,label输出形式\nclass DataLoader:\n def __init__(self, data_iter):\n self.data_iter = data_iter\n self.length = len(data_iter)\n\n def __len__(self):\n return self.length\n\n def __iter__(self):\n # 注意:此处调整features为 batch first,并调整label的shape和dtype\n for batch in self.data_iter:\n yield (torch.transpose(batch.text, 0, 1),\n torch.unsqueeze(batch.label.float(), dim=1))\n\n\ndl_train = DataLoader(train_iter)\ndl_test = DataLoader(test_iter)\n\ntorch.random.seed()\n\n\nclass ScaledDotProductAttention(nn.Module):\n def __init__(self, temperature, attn_dropout=0.1):\n super(ScaledDotProductAttention, self).__init__()\n self.temperature = temperature\n self.dropout = nn.Dropout(attn_dropout)\n\n def forward(self, q, k, v, mask=None):\n # matmul and scale\n attn = torch.matmul(q / self.temperature, k.transpose(2, 3))\n\n # mask\n if mask is not None:\n attn = attn.masked_fill(mask == 0, -1e9)\n\n # softmax\n attn = self.dropout(F.softmax(attn, dim=-1))\n\n # matmul\n output = torch.matmul(attn, v)\n\n return output, attn\n\n\nclass MultiheadAttention(nn.Module):\n def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):\n super(MultiheadAttention, self).__init__()\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)\n self.fc = nn.Linear(n_head * d_v, d_model, bias=False)\n\n self.attention = ScaledDotProductAttention(temperature=d_k**0.5)\n\n self.dropout = nn.Dropout(dropout)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n def forward(self, q, k, v, mask=None):\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)\n\n residual = q\n q = self.layer_norm(q)\n\n # pass through the pre-attention projection: b x lg x (n*dv)\n # seperate different heads: b x lg x n x dv\n q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.w_qs(k).view(sz_b, len_k, n_head, d_k)\n v = self.w_qs(v).view(sz_b, len_v, n_head, d_v)\n\n # transpose for attention dor product: b x n x lg x dv\n q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)\n\n if mask is not None:\n mask = mask.unsqueeze(1) # for head axis broadcasting\n\n q, attn = self.attention(q, k, v, mask=mask)\n\n # transpose to move the head dimension back: b x lq x n x dv\n # combine the last two dimensions to concatenate all the heads together:b x lq x (n*dv)\n q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)\n q = self.dropout(self.fc(q))\n q += residual\n\n return q, attn\n\n\nclass BiLSTM_Attention(torchkeras.Model):\n def __init__(self):\n super(BiLSTM_Attention, self).__init__()\n self.embedding = nn.Embedding(MAX_WORDS, 100, 1)\n self.lstm = nn.LSTM(input_size=100,\n hidden_size=128,\n num_layers=1,\n batch_first=True,\n bidirectional=True)\n # self.attention = MultiheadAttention(8, 256, 64, 64)\n self.dropout = nn.Dropout(p=0.5)\n self.dense = nn.Sequential()\n self.dense.add_module(\"linear\", nn.Linear(128 * 2, 1))\n self.dense.add_module(\"sigmoid\", nn.Sigmoid())\n\n def attention_net(self, lstm_output, final_state):\n hidden = final_state.view(-1, 128 * 2, 1)\n attn_weights = torch.bmm(lstm_output, hidden).squeeze(2)\n soft_attn_weights = F.softmax(attn_weights, 1)\n context = torch.bmm(lstm_output.transpose(1, 2),\n soft_attn_weights.unsqueeze(2)).squeeze(2)\n\n return context\n\n def forward(self, x):\n x = self.embedding(x)\n x, (final_hidden_state, final_cell_state) = self.lstm(x)\n x = self.dropout(x)\n x = self.attention_net(x, final_hidden_state)\n y = self.dense(x)\n return y\n\n\nmodel = BiLSTM_Attention()\n\n\n# 准确率\ndef accuracy(y_pred, y_true):\n y_pred = torch.where(y_pred > 0.5,\n torch.ones_like(y_pred, dtype=torch.float32),\n torch.zeros_like(y_pred, dtype=torch.float32))\n acc = torch.mean(1 - torch.abs(y_true - y_pred))\n return acc\n\n\nmodel.to('cuda:4')\n\nmodel.compile(loss_func=nn.BCELoss(),\n optimizer=torch.optim.Adagrad(model.parameters(), lr=0.02),\n metrics_dict={\"accuracy\": accuracy},\n device='cuda:4')\n\n# 有时候模型训练过程中不收敛,需要多试几次\ndfhistory = model.fit(20, dl_train, dl_val=dl_test, log_step_freq=200)","sub_path":"bilstm_attn.py","file_name":"bilstm_attn.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"85158037","text":"\r\nfrom sklearn.ensemble import GradientBoostingRegressor as GBR\r\nfrom MLalgorithms._Regression import Regression\r\n\r\n\r\nclass GradientBoostingRegressor(Regression):\r\n\t\r\n\tdef fit(self, X, y, sample_weight=None, monitor=None):\r\n\t\treturn self.model.fit(sample_weight=sample_weight,\r\n\t\t\tX=X,\r\n\t\t\tmonitor=monitor,\r\n\t\t\ty=y)\r\n\r\n\tdef predict(self, X):\r\n\t\treturn self.model.predict(X=X)\r\n\r\n\tdef __init__(self, loss='ls', learning_rate=0.1, n_estimators=100, subsample=1.0, criterion='mse', min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_depth=3, min_impurity_decrease=0.0, min_impurity_split=None, random_state=None, max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False, validation_fraction=0.1, n_iter_no_change=None, tol=0.0001, ccp_alpha=0.0):\r\n\t\tself.max_depth = max_depth\r\n\t\tself.tol = tol\r\n\t\tself.max_leaf_nodes = max_leaf_nodes\r\n\t\tself.n_iter_no_change = n_iter_no_change\r\n\t\tself.min_impurity_decrease = min_impurity_decrease\r\n\t\tself.alpha = alpha\r\n\t\tself.ccp_alpha = ccp_alpha\r\n\t\tself.n_estimators = n_estimators\r\n\t\tself.criterion = criterion\r\n\t\tself.subsample = subsample\r\n\t\tself.min_samples_leaf = min_samples_leaf\r\n\t\tself.loss = loss\r\n\t\tself.warm_start = warm_start\r\n\t\tself.learning_rate = learning_rate\r\n\t\tself.min_weight_fraction_leaf = min_weight_fraction_leaf\r\n\t\tself.min_impurity_split = min_impurity_split\r\n\t\tself.min_samples_split = min_samples_split\r\n\t\tself.random_state = random_state\r\n\t\tself.max_features = max_features\r\n\t\tself.validation_fraction = validation_fraction\r\n\t\tself.verbose = verbose\r\n\t\tself.model = GBR(learning_rate = self.learning_rate,\r\n\t\t\tmin_impurity_decrease = self.min_impurity_decrease,\r\n\t\t\tccp_alpha = self.ccp_alpha,\r\n\t\t\tsubsample = self.subsample,\r\n\t\t\tmin_weight_fraction_leaf = self.min_weight_fraction_leaf,\r\n\t\t\tvalidation_fraction = self.validation_fraction,\r\n\t\t\talpha = self.alpha,\r\n\t\t\tmax_leaf_nodes = self.max_leaf_nodes,\r\n\t\t\tmax_depth = self.max_depth,\r\n\t\t\tmin_samples_split = self.min_samples_split,\r\n\t\t\tn_iter_no_change = self.n_iter_no_change,\r\n\t\t\tn_estimators = self.n_estimators,\r\n\t\t\tloss = self.loss,\r\n\t\t\twarm_start = self.warm_start,\r\n\t\t\ttol = self.tol,\r\n\t\t\tverbose = self.verbose,\r\n\t\t\tmin_impurity_split = self.min_impurity_split,\r\n\t\t\tmin_samples_leaf = self.min_samples_leaf,\r\n\t\t\tmax_features = self.max_features,\r\n\t\t\tcriterion = self.criterion,\r\n\t\t\trandom_state = self.random_state)\r\n\r\n","sub_path":"Code/Implement and design a machine learning hierarchy wrapper for SimpleML/MLalgorithms/Regression/_GradientBoostingRegressor.py","file_name":"_GradientBoostingRegressor.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"122625633","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\"\"\"\n\nimport datetime as dt\nfrom varsomdata import getvarsompickles as gvp\nfrom varsomdata import getobservations as go\nfrom utilities import makepickle as mp\nimport setenvironment as env\nimport os as os\nimport numpy as np\nimport pandas as pd\n\n__author__ = 'raek'\n\n\nclass _WarningPartInt:\n\n def __init__(self, for_this_author, for_all):\n\n self.values = [float(i) for i in for_this_author]\n self.values_all = [float(i) for i in for_all]\n\n self.avg_values = np.mean(np.array(self.values))\n self.avg_values_all = np.mean(np.array(self.values_all))\n\n self.std_values = np.std(np.array(self.values))\n self.std_values_all = np.std(np.array(self.values_all))\n\n\nclass _WarningPartString:\n\n def __init__(self, for_this_author, for_all):\n\n self.texts = for_this_author\n self.texts_all = for_all\n\n self.values = [len(str(t)) for t in self.texts]\n self.values_all = [len(str(t)) for t in self.texts_all]\n\n self.avg_values = np.mean(np.array(self.values))\n self.avg_values_all = np.mean(np.array(self.values_all))\n\n self.std_values = np.std(np.array(self.values))\n self.std_values_all = np.std(np.array(self.values_all))\n\n self.daily_text_diff = []\n self.daily_text_diff_all = []\n\n\nclass Forecaster:\n\n def __init__(self, author_in):\n\n self.author = author_in\n\n # About the warning production for current author\n self.warnings = [] # all warnings made by this forecaster\n self.warning_count = 0 # int number of warnings\n self.warning_publish_dates = {} # dict of {date: number of warnings}\n self.work_days = 0\n\n # About the warning content for current author\n self.danger_levels = [] # ints\n self.main_texts = [] # strings\n self.avalanche_dangers = [] # strings\n self.snow_surfaces = [] # strings\n self.current_weak_layers = [] # strings\n self.problems_pr_warnings = [] # ints\n\n # About observations for current author\n self.observer_id = None # int observer id in regObs\n self.observer_nick = None\n self.observations = []\n self.observation_count = 0\n self.avalanche_evaluation_count = 0\n self.avalanche_problem_count = 0\n self.snow_profile_count = 0\n\n def add_warning(self, warning_in):\n # add warning\n self.warnings.append(warning_in)\n self._update_warning_key_figures()\n\n def _update_warning_key_figures(self):\n # update numbers on the authors production\n self.warning_count += 1\n\n publish_date = self.warnings[-1].publish_time.date()\n if publish_date in self.warning_publish_dates.keys():\n self.warning_publish_dates[publish_date] += 1\n else:\n self.warning_publish_dates[publish_date] = 1\n\n self.work_days = len(self.warning_publish_dates.keys())\n\n def add_observation(self, observation_in):\n # add observations and update numbers for about authors observations\n self.observations.append(observation_in)\n self.observer_id = observation_in.ObserverID\n self.observer_nick = observation_in.NickName\n self.observation_count = len(self.observations)\n\n for f in observation_in.Observations:\n\n if isinstance(f, go.AvalancheEvaluation3):\n self.avalanche_evaluation_count += 1\n\n if isinstance(f, go.AvalancheEvalProblem2):\n self.avalanche_problem_count += 1\n\n if isinstance(f, go.SnowProfile):\n self.snow_profile_count += 1\n\n def add_danger_levels(self, danger_levels_in, danger_levels_all_in):\n self.danger_levels = _WarningPartInt(danger_levels_in, danger_levels_all_in)\n\n def add_main_texts(self, main_texts_in, main_texts_all_in):\n self.main_texts = _WarningPartString(main_texts_in, main_texts_all_in)\n\n def add_avalanche_dangers(self, avalanche_dangers_in, avalanche_dangers_all_in):\n self.avalanche_dangers = _WarningPartString(avalanche_dangers_in, avalanche_dangers_all_in)\n\n def add_snow_surfaces(self, snow_surface_in, snow_surface_all_in):\n self.snow_surfaces = _WarningPartString(snow_surface_in, snow_surface_all_in)\n\n def add_current_weak_layers(self, current_weak_layers_in, current_weak_layers_all_in):\n self.current_weak_layers = _WarningPartString(current_weak_layers_in, current_weak_layers_all_in)\n\n def add_problems_pr_warnings(self, problems_pr_warnings_in, problems_pr_warnings_all_in):\n self.problems_pr_warnings = _WarningPartInt(problems_pr_warnings_in, problems_pr_warnings_all_in)\n\n def to_dict(self):\n\n _dict = {'Author': self.author,\n 'WarningCount': self.warning_count,\n 'WorkDays': self.work_days,\n 'ObserverNick': self.observer_nick,\n 'ObserverID': self.observer_id,\n 'ObservationCount': self.observation_count,\n 'AvalancheEvaluationCount': self.avalanche_evaluation_count,\n 'AvalancheProblemCount': self.avalanche_problem_count,\n 'SnowProfileCount': self.snow_profile_count\n }\n\n return _dict\n\n\ndef make_forecaster_data(year):\n \"\"\"\n For one season, make the forecaster dictionary with all the necessary data.\n :param year: [string] Eg. season '2018-19'\n \"\"\"\n\n # The data\n all_warnings = gvp.get_all_forecasts(year, max_file_age=23)\n all_observation_forms = gvp.get_all_observations(year, geohazard_tids=10, max_file_age=23)\n\n forecaster_data = {}\n\n for w in all_warnings:\n if w.author in forecaster_data.keys():\n forecaster_data[w.author].add_warning(w)\n else:\n forecaster_data[w.author] = Forecaster(w.author)\n forecaster_data[w.author].add_warning(w)\n\n # number_by_author_sorted = sorted(number_by_author.items(), key=lambda kv: kv[1], reverse=True)\n\n for o in all_observation_forms:\n if o.NickName in forecaster_data.keys():\n forecaster_data[o.NickName].add_observation(o)\n\n forecaster_list_of_dict = []\n for v in forecaster_data.values():\n forecaster_list_of_dict.append(v.to_dict())\n\n import csv\n with open('{0}forecaster_followup.txt'.format(env.output_folder), 'w', encoding='utf8') as f:\n dict_writer = csv.DictWriter(f, delimiter=';', fieldnames=forecaster_list_of_dict[0].keys())\n dict_writer.writeheader()\n dict_writer.writerows(forecaster_list_of_dict)\n\n return\n\n\ndef make_forecaster_data_old(year):\n \"\"\"For one season, make the forecaster dictionary with all the necessary data.\n :param year: [string] Eg. season '2017-18'\n \"\"\"\n\n # get all valid forecasts\n all_warnings = gvp.get_all_forecasts(year, max_file_age=100)\n\n # get authors of all forecasters.\n authors = []\n for w in all_warnings:\n if w.author not in authors:\n authors.append(w.author)\n\n # Make data set with dict {author: Forecaster}. Add warnings to Forecaster object.\n # Note: A list of all authors are all the keys in this dictionary.\n forecaster_dict = {}\n for w in all_warnings:\n if w.author not in forecaster_dict:\n forecaster_dict[w.author] = Forecaster(w.author)\n forecaster_dict[w.author].add_warning(w)\n else:\n forecaster_dict[w.author].add_warning(w)\n\n # need this below for forecaster statistics\n danger_levels_all = [] # ints\n main_texts_all = [] # strings\n avalanche_dangers_all = [] # strings\n snow_surfaces_all = [] # strings\n current_weak_layers_all = [] # strings\n problems_pr_warnings_all = [] # ints\n for w in all_warnings:\n danger_levels_all.append(w.danger_level)\n main_texts_all.append(w.main_text)\n avalanche_dangers_all.append(w.avalanche_danger)\n snow_surfaces_all.append(w.snow_surface)\n current_weak_layers_all.append(w.current_weak_layers)\n problems_pr_warnings_all.append(len(w.avalanche_problems))\n\n # Add data about the authors forecasts to forecaster objects in the dict\n for n, f in forecaster_dict.items():\n\n # add numbers of warnings made\n forecaster_dict[f.author].add_warnings_count(len(f.warnings))\n\n # find how many pr date valid\n dates_valid = {}\n for w in f.warnings:\n if w.date_valid not in dates_valid:\n dates_valid[w.date_valid] = 1\n else:\n dates_valid[w.date_valid] += 1\n forecaster_dict[f.author].add_dates_valid(dates_valid)\n\n # add data on the danger levels forecasted\n danger_levels_author = []\n for w in f.warnings:\n data = {'Date': w.date_valid,\n 'Region': w.region_name,\n 'DL': w.danger_level,\n 'Danger level': w.danger_level}\n forecaster_dict[f.author].add_danger_levels(danger_levels_author, danger_levels_all)\n\n # add data on the main texts made\n main_texts_author = [w.main_text for w in f.warnings]\n forecaster_dict[f.author].add_main_texts(main_texts_author, main_texts_all)\n\n # add data on the avalanche dangers made\n avalanche_dangers_author = [w.avalanche_danger for w in f.warnings]\n forecaster_dict[f.author].add_avalanche_dangers(avalanche_dangers_author, avalanche_dangers_all)\n\n # add data on the snow surfaces forecasted\n snow_surfaces_author = [w.snow_surface for w in f.warnings]\n forecaster_dict[f.author].add_snow_surfaces(snow_surfaces_author, snow_surfaces_all)\n\n # add data on the current weak layers made\n current_weak_layers_author = [w.current_weak_layers for w in f.warnings]\n forecaster_dict[f.author].add_current_weak_layers(current_weak_layers_author, current_weak_layers_all)\n\n # add data on the avalanche problems made\n problems_pr_warnings_author = [len(w.avalanche_problems) for w in f.warnings]\n forecaster_dict[f.author].add_problems_pr_warnings(problems_pr_warnings_author, problems_pr_warnings_all)\n\n return forecaster_dict\n\n\ndef make_plots(forecaster_dict, nick, path=''):\n\n import matplotlib.pyplot as plt\n import numpy\n import datetime as dt\n\n f = forecaster_dict[nick]\n\n plt.clf()\n bins = numpy.linspace(0, 1024, 20)\n plt.hist(f.nowcast_lengths_all, bins, alpha=0.5, color='k', label='Alle varsel')\n plt.axvline(f.nowcast_lengths_all_avg, color='k', linestyle='dashed', linewidth=3, label='Snitt alle')\n plt.hist(f.nowcast_lengths, bins, alpha=0.5, color='b', label='{0}'.format(nick))\n plt.axvline(f.nowcast_lengths_avg, color='b', linestyle='dashed', linewidth=3, label='Snitt {0}'.format(nick))\n plt.title(\"Tegn brukt paa naasituasjonen\")\n plt.xlabel(\"Antall\")\n plt.ylabel(\"Frekvens\")\n plt.legend(loc='upper left')\n plt.savefig('{0}{1}_nowcast.png'.format(path, f.observer_id))\n\n\n plt.clf()\n bins = numpy.linspace(0, 1024, 20)\n plt.hist(f.forecast_lengths_all, bins, alpha=0.5, color='k', label='Alle varsel')\n plt.axvline(f.forecast_lengths_all_avg, color='k', linestyle='dashed', linewidth=3, label='Snitt alle')\n plt.hist(f.forecast_lengths, bins, alpha=0.5, color='pink', label='{0}'.format(nick))\n plt.axvline(f.forecast_lengths_avg, color='pink', linestyle='dashed', linewidth=3, label='Snitt {0}'.format(nick))\n plt.title(\"Tegn brukt paa varsel\")\n plt.xlabel(\"Antall\")\n plt.ylabel(\"Frekvens\")\n plt.legend(loc='upper right')\n plt.savefig('{0}{1}_forecast.png'.format(path, f.observer_id))\n\n\n plt.clf()\n bins = numpy.linspace(0, 5, 6)\n plt.hist(f.danger_levels_all, bins, align='left', rwidth=0.5, alpha=0.5, color='k', label='Alle varsel')\n plt.axvline(f.danger_levels_all_avg, color='k', linestyle='dashed', linewidth=3, label='Snitt alle')\n plt.hist(f.danger_levels, bins, align='left', rwidth=0.5, color='pink', label='{0}'.format(nick))\n plt.axvline(f.danger_levels_avg, color='pink', linestyle='dashed', linewidth=3, label='Snitt {0}'.format(nick))\n plt.title(\"Fordeling paa faregrader\")\n plt.xlabel(\"Faregrad\")\n plt.ylabel(\"Frekvens\")\n plt.legend(loc='upper right')\n plt.savefig('{0}{1}_danger.png'.format(path, f.observer_id))\n\n\n plt.clf()\n bins = numpy.linspace(0, 4, 5)\n plt.hist(f.problems_pr_warning_all, bins, align='left', rwidth=0.5, alpha=0.5, color='k', label='Alle varsel')\n plt.axvline(f.problems_pr_warning_all_avg, color='k', linestyle='dashed', linewidth=3, label='Snitt alle')\n plt.hist(f.problems_pr_warning, bins, align='left', rwidth=0.5, color='pink', label='{0}'.format(nick))\n plt.axvline(f.problems_pr_warning_avg, color='pink', linestyle='dashed', linewidth=3, label='Snitt alle')\n plt.title(\"Skredproblemer pr varsel\")\n plt.xlabel(\"Antall\")\n plt.ylabel(\"Frequency\")\n plt.legend(loc='upper right')\n plt.savefig('{0}{1}_problems_pr_warning.png'.format(path, f.observer_id))\n\n\n plt.clf()\n antall_pr_dag = f.dates.values()\n if len(antall_pr_dag) != 0:\n snitt = sum(antall_pr_dag)/float(len(antall_pr_dag))\n plt.plot((dt.date(2016, 12, 1), dt.date(2017, 6, 1)), (snitt, snitt), color='g', linestyle='dashed', linewidth=3)\n # barplot needs datetimes not dates\n dates = [dt.datetime.combine(d, dt.datetime.min.time()) for d in f.dates.keys()]\n plt.bar(dates, antall_pr_dag, color='g')\n ymax = max(f.dates.values()) + 1\n plt.title(\"Antall varsler paa datoer\")\n plt.ylim(0, ymax)\n plt.xlim(dt.date(2016, 12, 1), dt.date(2017, 6, 1))\n plt.xticks( rotation=17 )\n plt.savefig('{0}{1}_dates.png'.format(path, f.observer_id))\n\n return\n\n\ndef make_html(forecaster_dict, nick, path='', type='Simple'):\n \"\"\"Makes a html with dates and links to the forecasts made by a given forecaster.\n\n :param forecaster_dict:\n :param nick:\n :param observer_id:\n :param path:\n :return:\n \"\"\"\n\n fc = forecaster_dict[nick]\n\n if 'Simple' in type:\n\n html_file_name = '{0}{1}_forecasts_simple.html'.format(path, fc.observer_id)\n f = open(html_file_name, 'w', encoding='utf-8')\n\n f.write('\\n\\n\\n')\n d_one_time = w.date\n f.write(' \\n'.format(d_one_time))\n f.write('
    ')\n d_one_time = None\n for w in fc.warnings:\n region_name_varsom = w.region_name.replace('aa','a').replace('oe', 'o').replace('ae', 'a')\n if d_one_time != w.date:\n f.write('
    {0}{0}'.format(w.region_name, region_name_varsom, w.date.strftime('%d.%m.%Y')))\n else:\n f.write(', {0}'.format(w.region_name, region_name_varsom, w.date.strftime('%d.%m.%Y')))\n f.write('
    ')\n\n elif 'Advanced' in type:\n\n html_file_name = '{0}{1}_forecasts_advanced.html'.format(path, fc.observer_id)\n f = open(html_file_name, 'w', encoding='utf-8')\n\n f.write('\\n'\n '
    \\n')\n f.write('
    \\n')\n f.write('')\n\n d_one_time = forecaster_dict[nick].warnings[0]\n for w in fc.warnings:\n region_name_varsom = w.region_name.replace('aa','a').replace('oe', 'o').replace('ae', 'a')\n problem_highlights = ''\n for p in w.avalanche_problems:\n if not hasattr(p, 'aval_size'):\n p.aval_size = 'Ikke gitt'\n problem_highlights += '
    P{6}[{5}, {0}, {1}, {2}, {3}, {4}]'.format(p.cause_name, p.aval_size, p.aval_probability, p.aval_distribution, p.aval_trigger, p.aval_type, p.order+1 )\n forecast_highlights = 'Faregrad {0}'.format(w.danger_level) + problem_highlights\n f.write('\\n')\n if d_one_time != w.date:\n d_one_time = w.date\n f.write(' \\n'.format(d_one_time))\n else:\n f.write(' \\n')\n f.write(' '.format(w.region_name, region_name_varsom, w.date.strftime('%d.%m.%Y')))\n f.write(' '.format(forecast_highlights))\n f.write('\\n')\n f.write('
    {0}{0}{0}
    ')\n\n\n else:\n html_file_name = '{0}{1}_forecasts.html'.format(path, fc.observer_id)\n f = open(html_file_name, 'w', encoding='utf-8')\n f.write('
    Ble ikke spurt om å lage noe data..')\n f.write('
    ')\n\n f.close()\n\n return\n\n\ndef make_m3_figs(forecaster_dict, nick, path=''):\n \"\"\"Makes m3 tables for each forecaster. Uses methods from the runmatrix module.\n\n :param forecaster_dict:\n :param nick: is how I can select relevant warnings for this forecaster\n :param product_folder: location where plots (endproduct) is saved\n :param project_folder: many files generated; make project folder in product folder\n :return:\n \"\"\"\n\n from varsomscripts import matrix as mx\n\n f = forecaster_dict[nick]\n # select only warnings for this forecaster\n one_forecaster_warnings = f.warnings\n\n # prepare dataset\n pickle_data_set_file_name = '{0}runforefollow data set {1}.pickle'.format(env.local_storage, f.observer_id)\n mx.pickle_data_set(one_forecaster_warnings, pickle_data_set_file_name, use_ikke_gitt=False)\n forecaster_data_set = mp.unpickle_anything(pickle_data_set_file_name)\n\n # prepare the m3 elementes (cell contents)\n pickle_m3_v2_file_name = '{0}runforefollow m3 {1}.pickle'.format(env.local_storage, f.observer_id)\n mx.pickle_M3(forecaster_data_set, 'matrixconfiguration.v2.csv', pickle_m3_v2_file_name)\n m3_v2_elements = mp.unpickle_anything(pickle_m3_v2_file_name)\n\n # plot\n plot_m3_v2_file_name = '{0}{1}_m3'.format(path, f.observer_id)\n mx.plot_m3_v2(m3_v2_elements, plot_m3_v2_file_name)\n\n return\n\n\ndef make_comparison_plots(forecaster_dict, path=''):\n\n import matplotlib.pyplot as plt; plt.rcdefaults()\n import numpy as np\n\n forecasters = [n for n,f in forecaster_dict.items()]\n y_pos = np.arange(len(forecasters))\n\n all_danger_levels = [f.danger_levels_avg for n,f in forecaster_dict.items()]\n all_danger_levels_std = [f.danger_levels_std for n,f in forecaster_dict.items()]\n\n all_problems_pr_warning = [f.problems_pr_warning_avg for n,f in forecaster_dict.items()]\n all_problems_pr_warning_std = [f.problems_pr_warning_std for n,f in forecaster_dict.items()]\n\n all_nowcast_lengths = [f.nowcast_lengths_avg for n,f in forecaster_dict.items()]\n all_nowcast_lengths_std = [f.nowcast_lengths_std for n,f in forecaster_dict.items()]\n\n all_forecast_lengths = [f.forecast_lengths_avg for n,f in forecaster_dict.items()]\n all_forecast_lengths_std = [f.forecast_lengths_std for n,f in forecaster_dict.items()]\n\n # all danger levels\n plt.clf()\n plt.figure(figsize=(12, 8))\n plt.barh(y_pos, all_danger_levels, align='center', alpha=0.5, color='pink',\n xerr=all_danger_levels_std, error_kw=dict(ecolor='k', lw=0.25, capsize=1.5, capthick=1.5))\n plt.yticks(y_pos, forecasters, rotation=0)\n plt.xlabel('Gjennomsnittlig faregrad')\n plt.xlim(0., 4.5)\n plt.axvline(forecaster_dict['Ragnar@NVE'].danger_levels_all_avg, color='k', linestyle='dashed', linewidth=3)\n plt.title('Sammenlignet varselt faregrad sesongen 2016/17')\n fig = plt.gcf()\n fig.subplots_adjust(left=0.2)\n plt.savefig('{0}all_danger_201617.png'.format(path))\n\n # all problems pr warning\n plt.clf()\n plt.figure(figsize=(12, 8))\n plt.barh(y_pos, all_problems_pr_warning, align='center', alpha=0.5, color='pink',\n xerr=all_problems_pr_warning_std, error_kw=dict(ecolor='k', lw=0.25, capsize=1.5, capthick=1.5))\n plt.yticks(y_pos, forecasters, rotation=0)\n plt.xlabel('Snitt antall skredproblemer')\n plt.xlim(0.5, 3.)\n plt.axvline(forecaster_dict['Ragnar@NVE'].problems_pr_warning_all_avg, color='k', linestyle='dashed', linewidth=3)\n plt.title('Antall skredproblemer pr varsel sesongen 2016/17')\n fig = plt.gcf()\n fig.subplots_adjust(left=0.2)\n plt.savefig('{0}all_problems_pr_warning_201617.png'.format(path))\n\n # all nowcast lengths\n plt.clf()\n plt.figure(figsize=(12, 8))\n plt.barh(y_pos, all_nowcast_lengths, align='center', alpha=0.5, color='b',\n xerr=all_nowcast_lengths_std, error_kw=dict(ecolor='k', lw=0.25, capsize=1.5, capthick=1.5))\n plt.yticks(y_pos, forecasters, rotation=0)\n plt.xlabel('Snitt tegn paa naasituasjon')\n plt.xlim(0., 1024.)\n plt.axvline(forecaster_dict['Ragnar@NVE'].nowcast_lengths_all_avg, color='k', linestyle='dashed', linewidth=3)\n plt.title('Antall tegn brukt i naasituasjonen sesongen 2016/17')\n fig = plt.gcf()\n fig.subplots_adjust(left=0.2)\n plt.savefig('{0}all_nowcast_lengths_201617.png'.format(path))\n\n # all forecast lengths\n plt.clf()\n plt.figure(figsize=(12, 8))\n plt.barh(y_pos, all_forecast_lengths, align='center', alpha=0.5, color='b',\n xerr=all_forecast_lengths_std, error_kw=dict(ecolor='k', lw=0.25, capsize=1.5, capthick=1.5))\n plt.yticks(y_pos, forecasters, rotation=0)\n plt.xlabel('Snitt tegn paa varselet')\n plt.xlim(0., 1024.)\n plt.axvline(forecaster_dict['Ragnar@NVE'].forecast_lengths_all_avg, color='k', linestyle='dashed', linewidth=3)\n plt.title('Antall tegn brukt i varselet sesongen 2016/17')\n fig = plt.gcf()\n fig.subplots_adjust(left=0.2)\n plt.savefig('{0}all_forecast_lengths_201617.png'.format(path))\n\n return\n\n\nif __name__ == \"__main__\":\n\n make_forecaster_data('2018-19')\n\n pass","sub_path":"varsomscripts/forecasterfollowup.py","file_name":"forecasterfollowup.py","file_ext":"py","file_size_in_byte":22182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"26137778","text":"import requests\nimport time\nimport execjs\n\n\n# 接口构造\n\n# 构造一个url\ndef getUrl(fscode):\n head = 'http://fund.eastmoney.com/pingzhongdata/'\n tail = '.js?v='+ time.strftime(\"%Y%m%d%H%M%S\",time.localtime())\n return head+fscode+tail\n\n\n# 获取净值\ndef getWorth(fscode):\n #用requests获取到对应的文件\n content = requests.get(getUrl(fscode))\n #使用execjs获取到相应的数据\n jsContent = execjs.compile(content.text)\n name = jsContent.eval('fS_name')\n code = jsContent.eval('fS_code')\n #单位净值走势\n netWorthTrend = jsContent.eval('Data_netWorthTrend')\n #累计净值走势\n ACWorthTrend = jsContent.eval('Data_ACWorthTrend')\n netWorth = []\n ACWorth = []\n #提取出里面的净值\n for dayWorth in netWorthTrend[::-1]:\n netWorth.append(dayWorth['y'])\n for dayACWorth in ACWorthTrend[::-1]:\n ACWorth.append(dayACWorth[1])\n print(name,code)\n return netWorth, ACWorth\n\n","sub_path":"技术学习/爬虫.py","file_name":"爬虫.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"394346795","text":"from peewee import *\nfrom database import SQLiteModel\nfrom bs4 import BeautifulSoup\nimport requests\n\ndef populate():\n Serial.insert_many(remote_list()).execute()\n\ndef remote_list():\n sent = []\n r = requests.get('http://www.lostfilm.tv/serials.php')\n page = BeautifulSoup(r.text)\n for link in page.find_all('a', class_='bb_a'):\n ru = link.contents[0]\n serial_id = link['href'][16:]\n en = link.find('span').text[1:-1]\n if sent.count(serial_id) == 0:\n sent.append(serial_id)\n yield {\n 'serial_id': link['href'][16:],\n 'en_name': link.find('span').text[1:-1],\n 'ru_name': link.contents[0]\n }\n\ndef find_by_name(name):\n name = '%%%s%%' % (name, )\n query = Serial.select().where(Serial.en_name ** name\\\n | Serial.ru_name ** name)\n for i in query:\n yield i\n\nclass Serial(SQLiteModel):\n serial_id = IntegerField(primary_key=True)\n en_name = CharField()\n ru_name = DateField()\n\n def __repr__(self):\n return \"\" % (self.serial_id, self.en_name)\n","sub_path":"lostfilm/serial.py","file_name":"serial.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"349041667","text":"import asyncio\r\nimport sys\r\nimport configparser\r\nimport json\r\nfrom telethon import TelegramClient\r\nfrom telethon.tl.functions.channels import GetParticipantsRequest\r\nfrom telethon.tl.functions.users import GetFullUserRequest\r\nfrom quart import Quart, jsonify, request\r\nfrom quart_cors import cors, route_cors\r\n\r\n# set app\r\napp = Quart(__name__)\r\napp = cors(app)\r\n\r\n# set route\r\n@app.route(\"/getUserID\", methods=[\"POST\"])\r\n@route_cors()\r\nasync def index():\r\n test_client = app.test_client()\r\n app.config[\"QUART_CORS_EXPOSE_HEADERS\"] = [\"X-Special\", \"X-Other\"]\r\n response = await test_client.get(\"/\", headers={\"Origin\": \"http://localhost\"})\r\n assert response.access_control.allow_origin == {\"*\"}\r\n assert response.access_control.expose_headers == {\"X-Special\", \"X-Other\"}\r\n\r\n # get the json variables from the request\r\n jsonObjectIn = await request.get_json()\r\n\r\n #map variables from request\r\n tipToUserName = jsonObjectIn['tipToUserName']\r\n tipFromID = jsonObjectIn['tipFromID']\r\n telegramGroupID = jsonObjectIn['telegramGroupID']\r\n tipFromUserName = jsonObjectIn['tipFromUserName']\r\n tipAmount = jsonObjectIn['tipAmount']\r\n tipToken = jsonObjectIn['tipToken']\r\n memo = jsonObjectIn['memo']\r\n tokenID = jsonObjectIn['tokenID']\r\n\r\n # load in the config for Telegram API\r\n config = configparser.ConfigParser()\r\n config.read('settings.ini')\r\n api_id = int(config['Telegram_Credentials']['api_id'])\r\n api_hash = config['Telegram_Credentials']['api_hash']\r\n phone_number = config['Telegram_Credentials']['phone_number']\r\n\r\n ## set client as global var\r\n global client\r\n\r\n ## if the client is already defined don't reload\r\n try:\r\n client\r\n except NameError:\r\n client = await TelegramClient('session_name', api_id, api_hash).start()\r\n else:\r\n print(\"client is defined\")\r\n\r\n jsonOut = {\r\n \"tipToUserName\": str(tipToUserName),\r\n \"tipFromID\": str(tipFromID),\r\n \"telegramGroupID\": str(telegramGroupID),\r\n \"tipFromUserName\": str(tipFromUserName),\r\n \"tipAmount\": str(tipAmount),\r\n \"tipToken\": str(tipToken),\r\n \"memo\": str(memo),\r\n \"tokenID\": str(tokenID)\r\n }\r\n\r\n try:\r\n full = await client(GetFullUserRequest(str(tipToUserName)))\r\n userID = full.user.id\r\n jsonOut.update({\"userID\": str(userID)})\r\n except:\r\n userID = None\r\n \r\n return json.dumps(jsonOut)\r\n\r\napp.run(port=5000, host='0.0.0.0')\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"57971296","text":"import urllib.request as rq\nimport urllib\nimport datetime\nimport json\nfrom itertools import count\nimport pymysql\n\ndef get_url_request(url):\n req = rq.Request(url)\n\n try:\n response = rq.urlopen(req)\n if response.getcode() == 200:\n print(\"Request Success: %s , %s\" % (url,datetime.datetime.now()))\n return response.read().decode('utf-8')\n else:\n print(\"Request Failure: %s , %s\" % (url,datetime.datetime.now()))\n return None\n\n except Exception as e:\n print(e)\n return None\n\ndef get_hospital_div(lat, lon, hpid):\n base = \"http://apis.data.go.kr/B552657/HsptlAsembySearchService/getHsptlMdcncLcinfoInqire\"\n access_key = \"?_type=json&serviceKey=\" +\"ZUWGoqtxSHbAZ2uscevjEZEQOWzRJiJhjlFdwQbfsWHl3hnHeYmVG1BFrnxmK%2FCVga%2F%2FE5MEaobdQu437QBsRw%3D%3D\"\n parameters = \"&WGS84_LAT=%s&WGS84_LON=%s\" % (lat, lon)\n\n url = base + access_key + parameters\n\n divData = get_url_request(url)\n\n if divData == None:\n print('none')\n return ''\n\n try:\n divData = json.loads(divData)\n except Exception as e:\n print(e)\n return ''\n\n for item in divData[\"response\"][\"body\"][\"items\"][\"item\"]:\n if item[\"hpid\"] == hpid:\n return(item[\"dutyDivName\"])\n\n return ''\n\ndef save_date_mysql(jsonResult):\n conn = pymysql.connect(host='127.0.0.1', user='zerosw', password='123456', db='meindb')\n cursor = conn.cursor()\n\n sql = 'insert into infos_maininfo values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\n sql2 = 'insert into infos_sub1info(mondayStart, mondayEnd, tuesdayStart, tuesdayEnd,' \\\n ' wednesdayStart, wednesdayEnd, thursdayStart, thursdayEnd, fridayStart, fridayEnd, ' \\\n 'saturdayStart, saturdayEnd, sundayStart, sundayEnd, holidayStart, holidayEnd, hpid_id)' \\\n ' values(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\n\n for idx, item in enumerate(jsonResult):\n data = (item['hpid'], item['name'], item['div'], item['subject'], item['tel'], item['etel'],\n item['info'], item['lat'], item['lon'], item['addr'], item['emergency'], item['limbs'],\n item['pregnent'], item['newborn'], item['burn'], item['dialysis'])\n print('data:', data)\n\n data2 = (item['mondayStart'], item['mondayEnd'], item['tuesdayStart'], item['tuesdayEnd'],\n item['wednesdayStart'], item['wednesdayEnd'], item['thursdayStart'], item['thursdayEnd'],\n item['fridayStart'], item['fridayEnd'], item['saturdayStart'], item['saturdayEnd'],\n item['sundayStart'], item['sundayEnd'], item['holidayStart'], item['holidayEnd'], item['hpid'])\n print('data2:', data2)\n\n cursor.execute(sql, data)\n cursor.execute(sql2, data2)\n\n conn.commit()\n cursor.close()\n conn.close()\n print(\"database save\")\n\ndef get_hospital_list(q0, q1, i):\n base = \"http://apis.data.go.kr/B552657/HsptlAsembySearchService/getHsptlMdcncListInfoInqire\"\n access_key = \"?_type=json&serviceKey=\" +\"ZUWGoqtxSHbAZ2uscevjEZEQOWzRJiJhjlFdwQbfsWHl3hnHeYmVG1BFrnxmK%2FCVga%2F%2FE5MEaobdQu437QBsRw%3D%3D\"\n parameters = \"&Q0=%s&Q1=%s&pageNo=%d\" %(urllib.parse.quote(q0), urllib.parse.quote(q1), i)\n\n url = base + access_key + parameters\n print(\"url:\", url)\n\n rData = get_url_request(url)\n\n if rData == None:\n return None\n\n hpid_list = []\n\n rData = json.loads(rData)\n\n for item in rData[\"response\"][\"body\"][\"items\"][\"item\"]:\n hpid_list.append(item['hpid'])\n\n return hpid_list\n\n\ndef get_url_data(hp):\n base = \"http://apis.data.go.kr/B552657/HsptlAsembySearchService/getHsptlBassInfoInqire\"\n access_key = \"?_type=json&serviceKey=\" +\"ZUWGoqtxSHbAZ2uscevjEZEQOWzRJiJhjlFdwQbfsWHl3hnHeYmVG1BFrnxmK%2FCVga%2F%2FE5MEaobdQu437QBsRw%3D%3D\"\n pageNo = \"&HPID=%s\" % hp\n\n url = base + access_key + pageNo\n print(\"url:\", url)\n\n rData = get_url_request(url)\n\n if rData == None:\n print(None)\n return None\n\n try:\n rData = json.loads(rData)\n except Exception as e:\n print(e)\n return None\n\n jsonResult = []\n print(rData['response']['body']['items']['item'][\"hpid\"])\n\n item = rData['response']['body']['items']['item']\n hpid = '' if \"hpid\" not in item.keys() else item[\"hpid\"]\n lat = 0 if \"wgs84Lat\" not in item.keys() else item[\"wgs84Lat\"]\n lon = 0 if \"wgs84Lon\" not in item.keys() else item[\"wgs84Lon\"]\n if lat == 0 and lon == 0:\n div = \"\"\n else:\n div = get_hospital_div(lat, lon, hpid)\n name = '' if 'dutyName' not in item.keys() else item['dutyName']\n subject = '' if \"dgidIdName\" not in item.keys() else item[\"dgidIdName\"]\n tel = '' if \"dutyTel1\" not in item.keys() else item[\"dutyTel1\"]\n etel = '' if\"dutyTel3\" not in item.keys() else item[\"dutyTel3\"]\n info = '' if \"dutyInf\" not in item.keys() else item[\"dutyInf\"]\n mondayStart = '' if \"dutyTime1s\" not in item.keys() else str(item[\"dutyTime1s\"])[:2] + \":\" + str(item[\"dutyTime1s\"])[2:]\n mondayEnd = '' if \"dutyTime1c\" not in item.keys() else str(item[\"dutyTime1c\"])[:2] + \":\" + str(item[\"dutyTime1c\"])[2:]\n tuesdayStart = '' if \"dutyTime2s\" not in item.keys() else str(item[\"dutyTime2s\"])[:2] + \":\" + str(item[\"dutyTime2s\"])[2:]\n tuesdayEnd = '' if \"dutyTime2c\" not in item.keys() else str(item[\"dutyTime2c\"])[:2] + \":\" + str(item[\"dutyTime2c\"])[2:]\n wednesdayStart = '' if \"dutyTime3s\" not in item.keys() else str(item[\"dutyTime3s\"])[:2] + \":\" + str(item[\"dutyTime3s\"])[2:]\n wednesdayEnd = '' if \"dutyTime3c\" not in item.keys() else str(item[\"dutyTime3c\"])[:2] + \":\" + str(item[\"dutyTime3c\"])[2:]\n thursdayStart = '' if \"dutyTime4s\" not in item.keys() else str(item[\"dutyTime4s\"])[:2] + \":\" + str(item[\"dutyTime4s\"])[2:]\n thursdayEnd = '' if \"dutyTime4c\" not in item.keys() else str(item[\"dutyTime4c\"])[:2] + \":\" + str(item[\"dutyTime4c\"])[2:]\n fridayStart = '' if \"dutyTime5s\" not in item.keys() else str(item[\"dutyTime5s\"])[:2] + \":\" + str(item[\"dutyTime5s\"])[2:]\n fridayEnd = '' if \"dutyTime5c\" not in item.keys() else str(item[\"dutyTime5c\"])[:2] + \":\" + str(item[\"dutyTime5c\"])[2:]\n saturdayStart = '' if \"dutyTime6s\" not in item.keys() else str(item[\"dutyTime6s\"])[:2] + \":\" + str(item[\"dutyTime6s\"])[2:]\n saturdayEnd = '' if \"dutyTime6c\" not in item.keys() else str(item[\"dutyTime6c\"])[:2] + \":\" + str(item[\"dutyTime6c\"])[2:]\n sundayStart = '' if \"dutyTime7s\" not in item.keys() else str(item[\"dutyTime7s\"])[:2] + \":\" + str(item[\"dutyTime7s\"])[2:]\n sundayEnd = '' if \"dutyTime7c\" not in item.keys() else str(item[\"dutyTime7c\"])[:2] + \":\" + str(item[\"dutyTime7c\"])[2:]\n holidayStart = '' if \"dutyTime8s\" not in item.keys() else str(item[\"dutyTime8s\"])[:2] + \":\" + str(item[\"dutyTime8s\"])[2:]\n holidayEnd = '' if \"dutyTime8c\" not in item.keys() else str(item[\"dutyTime8c\"])[:2] + \":\" + str(item[\"dutyTime8c\"])[2:]\n addr = '' if \"dutyAddr\" not in item.keys() else item[\"dutyAddr\"]\n emergency = '' if \"MKioskTy25\" not in item.keys() else item[\"MKioskTy25\"]\n limbs = '' if \"MKioskTy5\" not in item.keys() else item[\"MKioskTy5\"]\n pregnent = '' if \"MKioskTy8\" not in item.keys() else item[\"MKioskTy8\"]\n newborn = '' if \"MKioskTy10\" not in item.keys() else item[\"MKioskTy10\"]\n burn = '' if \"MKioskTy11\" not in item.keys() else item[\"MKioskTy11\"]\n dialysis = '' if \"MKioskTy7\" not in item.keys() else item[\"MKioskTy7\"]\n\n\n\n jsonResult.append({\"hpid\":hpid, \"name\":name, \"div\":div, \"subject\":subject, \"tel\":tel, \"etel\":etel, \"addr\":addr,\n \"lat\":lat, \"lon\":lon, \"info\":info, \"mondayStart\":mondayStart, \"mondayEnd\":mondayEnd,\n \"tuesdayStart\":tuesdayStart, \"tuesdayEnd\":tuesdayEnd, \"wednesdayStart\":wednesdayStart, \"wednesdayEnd\":wednesdayEnd,\n \"thursdayStart\":thursdayStart, \"thursdayEnd\":thursdayEnd, \"fridayStart\":fridayStart, \"fridayEnd\":fridayEnd,\n \"saturdayStart\":saturdayStart, \"saturdayEnd\":saturdayEnd, \"sundayStart\":sundayStart, \"sundayEnd\":sundayEnd,\n \"holidayStart\":holidayStart, \"holidayEnd\":holidayEnd, \"emergency\":emergency, \"limbs\":limbs,\n \"pregnent\":pregnent, \"newborn\":newborn, \"burn\":burn, \"dialysis\":dialysis\n })\n\n # save_date_mysql(jsonResult)\n\n # return 1\n try:\n with open('medical_info_용산구.json', 'a', encoding='utf-8') as f:\n reJson = json.dumps(jsonResult, indent=4, ensure_ascii=False)\n f.write(reJson)\n except Exception as e :\n print(e)\n print(\"Fail to create json file_%s\" % datetime.datetime.now())\n\n print(\"Success to create json file_%s\" % datetime.datetime.now())\n\nif __name__ == '__main__':\n f = open('medical_info_용산구.json', 'w', encoding='utf-8')\n f.close()\n\n for i in count(1):\n h_list = get_hospital_list('서울특별시', '용산구', i)\n if h_list == None:\n print(\"Break exit\")\n break\n\n for hp in h_list:\n get_url_data(hp)\n\n\n\n","sub_path":"Python/CrolingTw/public_data/medical_info.py","file_name":"medical_info.py","file_ext":"py","file_size_in_byte":9121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"59041645","text":"import io\nimport json\nimport os\nimport sys\nimport time\nimport pandas as pd\nimport argparse\nimport lxml.html\nimport requests\nfrom cred import get_API_key\nfrom lxml.cssselect import CSSSelector\nfrom pymongo import MongoClient\n\nYOUTUBE_VIDEO_URL = 'https://www.youtube.com/watch?v={youtube_id}'\nYOUTUBE_COMMENTS_AJAX_URL_OLD = 'https://www.youtube.com/comment_ajax'\nYOUTUBE_COMMENTS_AJAX_URL_NEW = 'https://www.youtube.com/comment_service_ajax'\n\nUSER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'\n\n\ndef find_value(html, key, num_chars=2, separator='\"'):\n pos_begin = html.find(key) + len(key) + num_chars\n pos_end = html.find(separator, pos_begin)\n return html[pos_begin: pos_end]\n\n\ndef ajax_request(session, url, params=None, data=None, headers=None, retries=5, sleep=20):\n for _ in range(retries):\n response = session.post(url, params=params, data=data, headers=headers)\n if response.status_code == 200:\n return response.json()\n if response.status_code in [403, 413]:\n return {}\n else:\n time.sleep(sleep)\n\n\ndef download_comments(youtube_id, sleep=.1):\n if r'\\\"isLiveContent\\\":true' in requests.get(YOUTUBE_VIDEO_URL.format(youtube_id=youtube_id)).text:\n print('Live stream detected! Not all comments may be downloaded.')\n return download_comments_new_api(youtube_id, sleep)\n return download_comments_old_api(youtube_id, sleep)\n\n\ndef download_comments_new_api(youtube_id, sleep=1):\n # Use the new youtube API to download some comments\n session = requests.Session()\n session.headers['User-Agent'] = USER_AGENT\n\n response = session.get(YOUTUBE_VIDEO_URL.format(youtube_id=youtube_id))\n html = response.text\n session_token = find_value(html, 'XSRF_TOKEN', 3)\n\n data = json.loads(find_value(html, 'window[\"ytInitialData\"] = ', 0, '\\n').rstrip(';'))\n for renderer in search_dict(data, 'itemSectionRenderer'):\n ncd = next(search_dict(renderer, 'nextContinuationData'), None)\n if ncd:\n break\n continuations = [(ncd['continuation'], ncd['clickTrackingParams'])]\n\n while continuations:\n continuation, itct = continuations.pop()\n response = ajax_request(session, YOUTUBE_COMMENTS_AJAX_URL_NEW,\n params={'action_get_comments': 1,\n 'pbj': 1,\n 'ctoken': continuation,\n 'continuation': continuation,\n 'itct': itct},\n data={'session_token': session_token},\n headers={'X-YouTube-Client-Name': '1',\n 'X-YouTube-Client-Version': '2.20200207.03.01'})\n\n if not response:\n break\n if list(search_dict(response, 'externalErrorMessage')):\n raise RuntimeError('Error returned from server: ' + next(search_dict(response, 'externalErrorMessage')))\n\n # Ordering matters. The newest continuations should go first.\n continuations = [(ncd['continuation'], ncd['clickTrackingParams'])\n for ncd in search_dict(response, 'nextContinuationData')] + continuations\n\n for comment in search_dict(response, 'commentRenderer'):\n yield {'cid': comment['commentId'],\n 'text': ''.join([c['text'] for c in comment['contentText']['runs']]),\n 'votes': comment.get('voteCount', {}).get('simpleText', '0')\n }\n\n time.sleep(sleep)\n\n\ndef search_dict(partial, key):\n if isinstance(partial, dict):\n for k, v in partial.items():\n if k == key:\n yield v\n else:\n for o in search_dict(v, key):\n yield o\n elif isinstance(partial, list):\n for i in partial:\n for o in search_dict(i, key):\n yield o\n\n\ndef download_comments_old_api(youtube_id, sleep=1):\n # Use the old youtube API to download all comments (does not work for live streams)\n session = requests.Session()\n session.headers['User-Agent'] = USER_AGENT\n\n # Get Youtube page with initial comments\n response = session.get(YOUTUBE_VIDEO_URL.format(youtube_id=youtube_id))\n html = response.text\n\n reply_cids = extract_reply_cids(html)\n\n ret_cids = []\n for comment in extract_comments(html):\n ret_cids.append(comment['cid'])\n yield comment\n\n page_token = find_value(html, 'data-token')\n session_token = find_value(html, 'XSRF_TOKEN', 3)\n\n first_iteration = True\n\n # Get remaining comments (the same as pressing the 'Show more' button)\n while page_token:\n data = {'video_id': youtube_id,\n 'session_token': session_token}\n\n params = {'action_load_comments': 1,\n 'order_by_time': True,\n 'filter': youtube_id}\n\n if first_iteration:\n params['order_menu'] = True\n else:\n data['page_token'] = page_token\n\n response = ajax_request(session, YOUTUBE_COMMENTS_AJAX_URL_OLD, params, data)\n if not response:\n break\n\n page_token, html = response.get('page_token', None), response['html_content']\n\n reply_cids += extract_reply_cids(html)\n for comment in extract_comments(html):\n if comment['cid'] not in ret_cids:\n ret_cids.append(comment['cid'])\n yield comment\n\n first_iteration = False\n time.sleep(sleep)\n\n # Get replies (the same as pressing the 'View all X replies' link)\n for cid in reply_cids:\n data = {'comment_id': cid,\n 'video_id': youtube_id,\n 'can_reply': 1,\n 'session_token': session_token}\n\n params = {'action_load_replies': 1,\n 'order_by_time': True,\n 'filter': youtube_id,\n 'tab': 'inbox'}\n\n response = ajax_request(session, YOUTUBE_COMMENTS_AJAX_URL_OLD, params, data)\n if not response:\n break\n\n html = response['html_content']\n\n for comment in extract_comments(html):\n if comment['cid'] not in ret_cids:\n ret_cids.append(comment['cid'])\n yield comment\n time.sleep(sleep)\n\n\ndef extract_comments(html):\n\n tree = lxml.html.fromstring(html)\n item_sel = CSSSelector('.comment-item')\n text_sel = CSSSelector('.comment-text-content')\n vote_sel = CSSSelector('.like-count.off')\n\n for item in item_sel(tree):\n yield {'cid': item.get('data-cid'),\n 'text': text_sel(item)[0].text_content(),\n 'votes': vote_sel(item)[0].text_content() if len(vote_sel(item)) > 0 else 0,\n }\n\ndef extract_reply_cids(html):\n tree = lxml.html.fromstring(html)\n sel = CSSSelector('.comment-replies-header > .load-comments')\n return [i.get('data-cid') for i in sel(tree)]\n\n# def get_comments_by_video(video_id, limit=500):\n\n# try:\n# if not video_id:\n# raise ValueError('L\\'id de channel n\\'est pas valide. ')\n\n# client = MongoClient('localhost', 27017)\n# db = client.youtube\n# collection = db.comments\n\n# comments_in_db = collection.find({\"video_id\": video_id})\n# df = pd.DataFrame(list(comments_in_db))\n \n# if df.shape[0] >0:\n# return df\n# client.close()\n# else:\n# print('Téléchargement de commentaire de vidéo: ', video_id)\n \n# client = MongoClient('localhost', 27017)\n# db = client.youtube\n# collection = db.comments\n\n# count = 0\n\n# start_time = time.time()\n# for comment in download_comments(video_id):\n \n# comment_df = {'video_id':video_id,'comment_id':comment['cid'], 'text':comment['text'],'votes':comment['votes']}\n\n# if collection.count_documents({'comment_id': comment['cid']}) > 0:\n# pass\n# else:\n# collection.insert_one(comment_df)\n\n# count += 1\n\n# print('Downloaded %d comment(s)\\r' % count)\n# if limit and count >= limit:\n# break\n \n# print('\\n[{:.2f} seconds] Done!'.format(time.time() - start_time))\n\n# comments_in_db = collection.find({\"video_id\": video_id})\n# comments_data = pd.DataFrame(list(comments_in_db)) \n# client.close()\n# return comments_data\n\n# except Exception as e:\n# print('Error:', str(e))\n# sys.exit(1)\n\ndef get_comments_by_video(video_id, limit, API_Key):\n '''\n This function can get comments by videos, if limit is smaller ou equal to 500 it use Youtu APi to get comments, else we use ajax to scrap comments\n '''\n try:\n if not video_id:\n raise ValueError('L\\'id de channel n\\'est pas valide. ')\n\n client = MongoClient('localhost', 27017)\n db = client.youtube\n collection = db.comments\n\n comments_in_db = collection.find({\"video_id\": video_id})\n df = pd.DataFrame(list(comments_in_db))\n \n if df.shape[0] >0:\n return df\n client.close()\n else:\n client = MongoClient('localhost', 27017)\n db = client.youtube\n collection = db.comments\n\n if limit <= 500:\n # request = \"https://www.googleapis.com/youtube/v3/commentThreads?key={}&textFormat=plainText&part=id,snippet&videoId={}&maxResults=50\".format(API_Key, video_id)\n # response = requests.get(request)\n # json = response.json()\n # comments = json['items']\n\n # try:\n # nextPageToken = json['nextPageToken']\n # except:\n # nexPageToken = None\n\n # while nextPageToken is not None and len(comments)< limit:\n # request = \"https://www.googleapis.com/youtube/v3/commentThreads?key={}&textFormat=plainText&part=id,snippet&videoId={}&maxResults=50&nextPageToken={}\".format(API_Key,video_id, nextPageToken)\n # response = requests.get(request)\n # json = response.json()\n # comments += json['items']\n # try:\n # nextPageToken = json['nextPageToken']\n # except:\n # nexPageToken = None\n comments = []\n next_page_token = None\n\n while 1:\n res = \"https://www.googleapis.com/youtube/v3/commentThreads?key={}&textFormat=plainText&part=id,snippet&videoId={}&maxResults=50&nextPageToken={}\".format(API_Key,video_id, next_page_token)\n\n comments += res['items']\n next_page_token = res.get('nextPageToken')\n\n if next_page_token is None:\n break \n \n for comment in comments:\n comment_id = comment['id']\n comment_vote = comment['snippet']['topLevelComment']['snippet']['textOriginal']\n comment_text = comment['snippet']['topLevelComment']['snippet'][ 'likeCount']\n comment_df = {'video_id':video_id,'comment_id':comment_id, 'text':comment_vote,'votes':comment_text}\n collection.insert_one(comment_df)\n \n else: \n print('Téléchargement de commentaire de vidéo: ', video_id)\n \n client = MongoClient('localhost', 27017)\n db = client.youtube\n collection = db.comments\n\n count = 0\n\n start_time = time.time()\n for comment in download_comments(video_id):\n \n comment_df = {'video_id':video_id,'comment_id':comment['cid'], 'text':comment['text'],'votes':comment['votes']}\n\n if collection.count_documents({'comment_id': comment['cid']}) > 0:\n pass\n else:\n collection.insert_one(comment_df)\n\n count += 1\n\n print('Downloaded %d comment(s)\\r' % count)\n if limit and count >= limit:\n break\n \n print('\\n[{:.2f} seconds] Done!'.format(time.time() - start_time))\n\n comments_in_db = collection.find({\"video_id\": video_id})\n comments_data = pd.DataFrame(list(comments_in_db)) \n client.close()\n return comments_data\n\n except Exception as e:\n print('Error:', str(e))\n sys.exit(1)\n\nif __name__ == '__main__':\n API_key = get_API_key(1)\n comments_df = get_comments_by_video(\"MGg318fNSDI\", 500, API_key)\n positive_nb, negative_nb = sentiment_analysis(comments_df)\n print(positive_nb, negative_nb)\n","sub_path":"scrap/comments.py","file_name":"comments.py","file_ext":"py","file_size_in_byte":13167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"411686211","text":"from django.shortcuts import render\nimport re\nfrom .models import BoardUsers\nfrom django.views.generic import View\nimport time\n\n\nclass MessageView(View):\n\n def get(self, request):\n boards = BoardUsers.objects.all()\n context = {'boards': boards}\n return render(request, 'board_message.html', context)\n\n def post(self, request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0] # 所以这里是真实的ip\n else:\n ip = request.META.get('REMOTE_ADDR') # 这里获得代理ip\n pattern = re.compile(r\"\\w+Browser|Chrome\")\n getmessage = pattern.findall(request.META[\"HTTP_USER_AGENT\"])\n body = request.POST.get(\"body\")\n username = request.POST.get(\"username\")\n email = request.POST.get(\"email\")\n data = BoardUsers()\n data.username = username\n data.email = email\n data.comment = body\n data.ip = ip\n data.created_time = time.strftime('%Y-%m-%d %H:%M:%S.000000', time.localtime(time.time()))\n if getmessage:\n data.ie_browser = getmessage[0]\n data.save()\n return render(request, 'board_message.html')\n","sub_path":"django_02/mysite/board/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376612369","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: songyunlong\n@license: (C) Copyright 2020-2025.\n@contact: 1243049371@qq.com\n@software: pycharm\n@file: intToRoman\n@time: 2020/6/7 3:10 下午\n'''\n\nclass IntToRoman:\n table = {1:'I', 4:'IV', 5:'V', 9:'IX', 10:'X', 40:'XL', 50:'L', 90:'XC',\n 100:'C', 400:'CD', 500:'D', 900:'CM', 1000:'M'}\n lis = list(table.keys())\n def __init__(self, num:int):\n self._num = num\n self._roman = ''\n\n def find2f(self, left, right):\n mid = (left + right) // 2\n # if self._num == IntToRoman.lis[mid]:\n # self._roman += IntToRoman.table[self._num]\n if self._num >= IntToRoman.lis[mid]:\n if mid < len(IntToRoman.lis) - 1:\n if self._num < IntToRoman.lis[mid+1]:\n n = self._num // IntToRoman.lis[mid]\n self._roman += IntToRoman.table[IntToRoman.lis[mid]] * n\n self._num -= IntToRoman.lis[mid] * n\n if self._num != 0: #不用检查mid-1和left的关系,若num=0,其中包括了mid=0的情况\n self.find2f(left=0, right=mid - 1) # 已经确定新的待计算值比mid索引位置的值小\n elif self._num != 0:\n self.find2f(left=mid+1, right=right)\n\n else: # mid索引为最后一个值\n n = self._num // IntToRoman.lis[mid]\n self._roman += IntToRoman.table[IntToRoman.lis[mid]] * n\n self._num -= IntToRoman.lis[mid] * n\n if self._num != 0:\n self.find2f(left=0, right=mid - 1) # 已经确定新的待计算值比mid索引位置的值小\n\n elif self._num < IntToRoman.lis[mid]: #待计算值比mid索引位置的值小\n if self._num != 0:\n self.find2f(left=left, right=mid-1)\n\n def __enter__(self):\n self.find2f(left=0, right=len(IntToRoman.lis)-1)\n return self._roman\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\nif __name__ == '__main__':\n with IntToRoman(num=12) as i1:\n print(i1)\n with IntToRoman(num=1222) as i2:\n print(i2)\n with IntToRoman(num=4) as i3:\n print(i3)\n with IntToRoman(num=9) as i4:\n print(i4)\n with IntToRoman(num=58) as i5:\n print(i5)\n with IntToRoman(num=1994) as i6:\n print(i6)\n with IntToRoman(num=1000) as i7:\n print(i7)","sub_path":"No_12/intToRoman.py","file_name":"intToRoman.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"50773200","text":"\n# Standard\n\n# Third Party\nfrom django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\nfrom reversion.admin import VersionAdmin\n\n# Local\nfrom books.admin import Sellable\nfrom members.models import Tag, Pushover, Tagging, VisitEvent, \\\n Member, Membership, PaidMembershipNudge, GroupMembership, \\\n MemberNote, MemberLogin, MembershipGiftCardRedemption, \\\n MembershipGiftCard, MembershipGiftCardReference, DiscoveryMethod, WifiMacDetected\n\n\n@admin.register(Tag)\nclass TagAdmin(VersionAdmin):\n fields = ['name','meaning']\n\n\n@admin.register(Pushover)\nclass PushoverAdmin(VersionAdmin):\n list_display = ['pk', 'who', 'key']\n raw_id_fields = ['who']\n\n\n@admin.register(Tagging)\nclass TaggingAdmin(VersionAdmin):\n\n def members_username(self, object):\n return object.tagged_member.username\n members_username.admin_order_field = 'tagged_member__auth_user__username'\n raw_id_fields = ['tagged_member', 'authorizing_member']\n list_display = ['pk', 'tagged_member', 'members_username', 'tag', 'can_tag', 'date_tagged', 'authorizing_member']\n search_fields = [\n '^tagged_member__auth_user__first_name',\n '^tagged_member__auth_user__last_name',\n 'tag__name',\n '^tagged_member__auth_user__username',\n ]\n\n\n@admin.register(VisitEvent)\nclass VisitEventAdmin(admin.ModelAdmin): # No need to version events.\n ordering = ['-when']\n list_display = ['pk', 'when', 'who', 'event_type', 'method', 'sync1']\n readonly_fields = ['when', 'who', 'event_type', 'method', 'sync1']\n search_fields = [\n '^who__auth_user__first_name',\n '^who__auth_user__last_name',\n '^who__auth_user__username',\n ]\n list_filter = ['when']\n date_hierarchy = 'when'\n\n\n@admin.register(WifiMacDetected)\nclass WifiMacDetectedAdmin(admin.ModelAdmin): # No need to version events.\n list_display = ['pk', 'mac', 'when']\n list_filter = ['when']\n date_hierarchy = 'when'\n search_fields = ['mac']\n\n\nclass MemberTypeFilter(admin.SimpleListFilter):\n title = \"Worker Type\"\n parameter_name = 'type'\n\n def lookups(self, request, model_admin):\n return (\n ('worktrade', _('Work-Trader')),\n ('intern', _('Intern')),\n ('scholar', _('Scholarship')),\n )\n\n def queryset(self, request, queryset):\n if self.value() == 'worktrade': return queryset.filter(tags__name=\"Work-Trader\")\n if self.value() == 'intern': return queryset.filter(tags__name=\"Intern\")\n if self.value() == 'scholar': return queryset.filter(tags__name=\"Scholarship\")\n\n\nclass TaggingForMember(admin.TabularInline):\n model = Tagging\n fk_name = 'tagged_member'\n raw_id_fields = ['authorizing_member']\n # model._meta.verbose_name = \"Tag\"\n # model._meta.verbose_name_plural = \"Tags\"\n extra = 0\n\n\n@admin.register(Member)\nclass MemberAdmin(VersionAdmin):\n\n list_display = [\n 'pk',\n 'username',\n 'first_name',\n 'last_name',\n 'email',\n # 'membership_card_when',\n # 'membership_card_md5'\n ]\n\n search_fields = [\n '^auth_user__first_name',\n '^auth_user__last_name',\n '^auth_user__username',\n 'auth_user__email',\n ]\n\n list_display_links = [\n 'pk',\n 'username',\n 'first_name',\n 'last_name',\n 'email',\n ]\n\n list_filter = [MemberTypeFilter]\n\n inlines = [TaggingForMember]\n\n class Media:\n css = {\n \"all\": (\"abutils/admin-tabular-inline.css\",) # This hides \"denormalized object descs\", to use Wojciech's term.\n }\n\n\nMEMBERSHIP_TYPE_CODE2STR = {code: str for (code, str) in Membership.MEMBERSHIP_TYPE_CHOICES}\n\n\nclass PaymentLinkedFilter(admin.SimpleListFilter):\n title = \"Linked\"\n parameter_name = 'type'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', _('Yes')),\n ('no', _('No')),\n )\n\n def queryset(self, request, queryset):\n if self.value() == 'yes': return queryset.filter(member__isnull=False)\n if self.value() == 'no': return queryset.filter(member__isnull=True)\n\n\n# @admin.register(PaymentAKA)\n# class MemberAKAAdmin(VersionAdmin):\n# list_display = ['pk', 'member', 'aka']\n# raw_id_fields = ['member']\n\n\n@admin.register(PaidMembershipNudge)\nclass PaidMembershipNudgeAdmin(admin.ModelAdmin): # No need to version these\n list_display = ['pk', 'member', 'when']\n raw_id_fields = ['member']\n ordering = ['-when']\n\n\n@admin.register(MemberLogin)\nclass MemberLoginAdmin(VersionAdmin):\n list_display = ['pk', 'member', 'when', 'ip']\n raw_id_fields = ['member']\n ordering = ['-when']\n\n\n@admin.register(MembershipGiftCard)\nclass MembershipGiftCardAdmin(VersionAdmin):\n\n def sold(self, obj):\n ref = obj.membershipgiftcardreference\n if ref is None: return None\n return ref.sale.sale_date\n\n def created(self, obj):\n return obj.date_created\n\n def redeemed(self, obj):\n redemp = obj.membershipgiftcardredemption\n if redemp is None: return None\n return redemp.redemption_date\n\n list_display = [\n 'pk',\n 'redemption_code',\n 'price',\n 'month_duration',\n 'created',\n 'sold',\n 'redeemed',\n ]\n search_fields = [\n 'redemption_code',\n ]\n list_display_links = ['pk', 'redemption_code']\n list_filter = ['month_duration', 'price']\n ordering = ['redemption_code']\n\n\n# This is an Inline for MembershipGiftCardRedemptionAdmin\nclass MembershipInline(admin.StackedInline):\n model = Membership\n extra = 0\n fields = [\n 'member',\n 'membership_type',\n ('start_date', 'end_date'),\n ]\n raw_id_fields = ['member']\n\n\n@admin.register(MembershipGiftCardRedemption)\nclass MembershipGiftCardRedemptionAdmin(VersionAdmin):\n def members(self, obj):\n return \",\".join([str(mli.member) for mli in obj.membership_set.all()])\n\n list_display = ['pk', 'members', 'card']\n search_fields = ['card__redemption_code']\n inlines = [\n MembershipInline,\n ]\n raw_id_fields = ['card']\n\n\n@admin.register(Membership)\nclass MembershipAdmin(VersionAdmin):\n ordering = ['-start_date']\n date_hierarchy = 'start_date'\n list_filter = [\n 'membership_type',\n 'protected',\n ]\n\n def type_fmt(self,obj): return MEMBERSHIP_TYPE_CODE2STR[obj.membership_type]\n type_fmt.admin_order_field = 'membership_type'\n type_fmt.short_description = 'type'\n\n def src_fmt(self,obj):\n if obj.sale is not None: return str(obj.sale)\n if obj.redemption is not None: return \"Gift card \"+obj.redemption.card.redemption_code\n if obj.group is not None: return \"Member of {} group\".format(obj.group.group_tag)\n else: return None\n src_fmt.short_description = 'source'\n\n list_display = [\n 'pk',\n 'member',\n 'type_fmt',\n 'start_date',\n 'end_date',\n 'sale_price',\n 'src_fmt',\n ]\n\n fields = [\n 'member',\n 'membership_type',\n ('start_date', 'end_date'),\n 'sale_price',\n 'protected',\n 'ctrlid',\n ]\n\n readonly_fields = ['ctrlid']\n raw_id_fields = ['member']\n\n search_fields = [\n '^member__auth_user__first_name',\n '^member__auth_user__last_name',\n '^member__auth_user__username',\n 'sale__payer_name',\n 'sale__payer_email',\n ]\n\n\n@admin.register(DiscoveryMethod)\nclass DiscoveryMethodAdmin(VersionAdmin):\n list_display = ['pk', 'order', 'name']\n ordering = ['order']\n\n\n# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n# Line-Item Inlines for SaleAdmin in Books app.\n# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n\n@Sellable(MembershipGiftCardReference)\nclass MembershipGiftCardLineItem(admin.StackedInline):\n fields = [\n 'sale_price',\n 'card',\n ]\n extra = 0\n raw_id_fields = ['card']\n\n\n@Sellable(Membership)\nclass MembershipLineItem(admin.StackedInline):\n extra = 0\n fields = [\n 'sale_price',\n 'member',\n 'membership_type',\n ('start_date', 'end_date'),\n ]\n raw_id_fields = ['member']\n\n\n@Sellable(GroupMembership)\nclass GroupMembershipLineItem(admin.StackedInline):\n extra = 0\n fields = [\n 'sale_price',\n 'group_tag',\n ('start_date', 'end_date'),\n 'max_members',\n ]\n\n\n","sub_path":"members/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":8457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"8654601","text":"#!/usr/bin/env python3\n# Author: Sara Mirzaee\n\nimport os\nimport numpy as np\nimport argparse\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.csgraph import minimum_spanning_tree\n#import matplotlib.pyplot as plt\n\ndef cmd_line_parse(iargs=None):\n parser = argparse.ArgumentParser(description='find the minimum number of connected good interferograms')\n parser.add_argument('-b', '--baselineDir', dest='baseline_dir', type=str, help='Baselines directory')\n parser.add_argument('-o', '--outFile', dest='out_file', type=str, default='./bestints.txt', help='Output text file')\n parser.add_argument('-t', '--temporalBaseline', dest='t_threshold', default=2, type=int,\n help='Number of sequential interferograms to consider')\n parser.add_argument('-p', '--perpBaseline', dest='p_threshold', default=200, type=int,\n help='Perpendicular baseline threshold')\n parser.add_argument('-d', '--date_list', dest='date_list', default=None, type=str,\n help='Text file having existing SLC dates')\n parser.add_argument('--MinSpanTree', dest='min_span_tree', action='store_true',\n help='Keep minimum spanning tree pairs')\n\n\n inps = parser.parse_args(args=iargs)\n return inps\n\n\ndef find_baselines(iargs=None):\n inps = cmd_line_parse(iargs)\n\n bf = os.listdir(inps.baseline_dir)\n if not bf[0].endswith('txt'):\n bf2 = ['{}/{}.txt'.format(d, d) for d in bf]\n else:\n bf2 = bf\n \n baselines = []\n for d in bf2:\n with open(os.path.join(inps.baseline_dir, d), 'r') as f:\n lines = f.readlines()\n if len(lines) != 0:\n if 'Bperp (average):' in lines[1]:\n baseline = lines[1].split('Bperp (average):')[1]\n else:\n baseline = lines[1].split('PERP_BASELINE_TOP')[1]\n baselines.append(baseline)\n\n baselines = [float(x.split('\\n')[0].strip()) for x in baselines]\n reference = bf[0].split('_')[0]\n dates = [x.split('_')[1].split('.')[0] for x in bf]\n dates.append(reference)\n baselines.append(0)\n if not inps.date_list is None:\n with open(inps.date_list, 'r') as fr:\n date_list = fr.readlines()\n date_list = [x.split('\\n')[0] for x in date_list]\n for i, date in enumerate(dates):\n if date not in date_list:\n del dates[i]\n del baselines[i]\n\n db_tuples = [(x, y) for x, y in zip(dates, baselines)]\n db_tuples.sort()\n\n dates = [x[0] for x in db_tuples]\n baselines = [x[1] for x in db_tuples]\n\n q = np.zeros([len(dates), len(dates)])\n\n for i, ds in enumerate(dates):\n t = np.arange(i - inps.t_threshold, i + inps.t_threshold)\n t = t[t >= 0]\n t = t[t < len(dates)]\n if len(t) > 0:\n for m in range(t[0], t[-1] + 1):\n q[i, m] = np.abs(baselines[i] - baselines[m])\n q[m, i] = q[i, m]\n\n #if len(t) > 3:\n # ss = np.where(q[i, :] == np.max(q[i, :]))[0]\n # q[i,ss]=0\n\n q[q > inps.p_threshold] = 0\n\n if inps.min_span_tree:\n X = csr_matrix(q)\n Tcsr = minimum_spanning_tree(X)\n A = Tcsr.toarray()\n else:\n for i in range(len(dates)):\n if len(np.nonzero(q[i, :])[0]) <= 1:\n q[i, :] = 0\n A = np.triu(q)\n\n \n ind1, ind2 = np.where(A > 0)\n intdates = ['{}_{}\\n'.format(dates[g], dates[h]) for g, h in zip(ind1, ind2)]\n intdates_test = ['{}_{}, {}, {}, {}\\n'.format(dates[g], dates[h], str(baselines[g]),\n str(baselines[h]), str(baselines[g] - baselines[h]))\n for g, h in zip(ind1, ind2)]\n\n with open(inps.out_file, 'w') as f:\n f.writelines(intdates)\n\n plot_baselines(intdates_test, os.path.join(os.path.dirname(inps.out_file)))\n\n return\n\n\ndef plot_baselines(ifgdates, out_dir):\n import matplotlib.pyplot as plt\n from datetime import datetime\n\n fig = plt.figure(figsize=(8, 4))\n\n dates = [x.split(',')[0].split('_') for x in ifgdates]\n baselines = [x.split('\\n')[0].split(',')[1:3] for x in ifgdates]\n\n for d, b in zip(dates, baselines):\n x1 = datetime.strptime(d[0], '%Y%m%d')\n x2 = datetime.strptime(d[1], '%Y%m%d')\n y1 = float(b[0])\n y2 = float(b[1])\n plt.plot([x1, x2], [y1, y2], '*-')\n\n fig.savefig(out_dir + '/unwrap_network.png', bbox_inches='tight', dpi=150)\n plt.close(fig)\n\n return\n\nif __name__ == '__main__':\n find_baselines()\n\n\n","sub_path":"minopy/find_short_baselines.py","file_name":"find_short_baselines.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"427259212","text":"import pygame\nimport time\n\n\nclass Player(pygame.sprite.Sprite):\n player_head = pygame.image.load('sprites/snake_head.png')\n player_body0 = pygame.image.load('sprites/snake_body_up.png')\n player_body2 = pygame.image.load('sprites/snake_body_down.png')\n player_body3 = pygame.image.load('sprites/snake_body_r.png')\n player_body1 = pygame.image.load('sprites/snake_body_l.png')\n player_tile = pygame.image.load('sprites/snake_tile.png')\n player_img = [player_head, player_body0, player_tile]\n\n def __init__(self, window, ai, x_pos=200, y_pos=200):\n super().__init__()\n self.window = window\n self.ai = ai\n self.direction = 0\n self.move = [0, -15]\n self.rect = pygame.Rect(x_pos, y_pos, 15, 15)\n self.snake_body = [[self.rect.x, self.rect.y, self.direction],\n [self.rect.x, self.rect.y + 15, self.direction],\n [self.rect.x, self.rect.y + 30, self.direction]]\n self.image0 = pygame.transform.scale(Player.player_head, (15, 15))\n self.image = self.image0\n self.mask = pygame.mask.from_surface(self.image)\n\n def move_snake(self):\n self.snake_body.insert(0, list([self.rect.x, self.rect.y, self.direction]))\n self.snake_body.insert(0, list([self.rect.x, self.rect.y, self.direction]))\n print(self.snake_body)\n self.snake_body.pop()\n\n def rotate(self, image, degree):\n return pygame.transform.rotate(image, degree)\n\n def screen_saver_crashed(self):\n if self.ai.score[1] > 1:\n self.ai.score[1] -= self.ai.lost_screen(self.window, 'Ой-Ой!')\n self.snake_body = [[self.rect.x, self.rect.y, self.direction],\n [self.rect.x, self.rect.y + 15, self.direction],\n [self.rect.x, self.rect.y + 30, self.direction]]\n return -1\n else:\n self.ai.lost_screen(self.window, 'Game Over')\n\n def update(self, *args):\n print(self.rect.x, self.rect.y)\n self.move_snake()\n self.draw_snake()\n if self.move[0] < 0:\n self.direction = 1\n self.image = self.rotate(self.image0, 90)\n if self.move[0] > 0:\n self.direction = 3\n self.image = self.rotate(self.image0, 270)\n if self.move[1] < 0:\n self.direction = 0\n self.image = self.rotate(self.image0, 0)\n if self.move[1] > 0:\n self.direction = 2\n self.image = self.rotate(self.image0, 180)\n if 0 < self.rect.x < self.ai.width - 30 and 5 < self.rect.y < self.ai.height - 35:\n self.rect = self.rect.move(self.move[0], self.move[1])\n # проверка на столкновение с собой\n for block in self.snake_body[1:]:\n if block[0] == self.rect.x and block[1] == self.rect.y:\n if self.screen_saver_crashed() == -1:\n break\n # проверка на столкновение с границами поля\n if self.rect.y <= 5 or self.rect.y >= self.ai.height - 35 or\\\n self.rect.x <= 0 or self.rect.x >= self.ai.width - 30:\n self.screen_saver_crashed()\n if self.rect.y <= 5:\n self.image = self.rotate(self.image0, 180)\n self.rect.y = 6\n self.move[1] = 15\n if self.rect.y >= self.ai.height - 35:\n self.image = self.rotate(self.image0, 0)\n self.rect.y = self.ai.height - 36\n self.move[1] = -15\n if self.rect.x <= 0:\n self.image = self.rotate(self.image0, 270)\n self.rect.x = 1\n self.move[0] = 15\n if self.rect.x >= self.ai.width - 30:\n self.image = self.rotate(self.image0, 90)\n self.rect.x = self.ai.width - 31\n self.move[0] = -15\n\n def draw_snake(self):\n \"\"\"Отображаем все сегменты змеи\"\"\"\n self.image_body0 = pygame.transform.scale(Player.player_body0, (15, 15))\n self.image_body2 = pygame.transform.scale(Player.player_body2, (15, 15))\n self.image_body3 = pygame.transform.scale(Player.player_body3, (15, 15))\n self.image_body1 = pygame.transform.scale(Player.player_body1, (15, 15))\n\n for pos in self.snake_body[1:-1]:\n if pos[2] == 0:\n self.image_ = self.image_body0\n if pos[2] == 2:\n self.image_ = self.image_body2\n if pos[2] == 1:\n self.image_ = self.image_body1\n if pos[2] == 3:\n self.image_ = self.image_body3\n #pygame.draw.circle(self.window, (0, 230, 0, 255), (pos[0], pos[1]), 10)\n self.window.blit(self.image_, pygame.Rect(pos[0], pos[1], 15, 15))\n\n self.image_tile0 = pygame.transform.scale(Player.player_tile, (15, 15))\n self.image_tile = self.image_tile0\n self.image_ = self.rotate(self.image_tile, self.snake_body[-1][2] * 90)\n self.window.blit(self.image_, pygame.Rect(self.snake_body[-1][0], self.snake_body[-1][1], 15, 15))\n\n\n\n\n","sub_path":"proekt_2021/bug.py","file_name":"bug.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"162707014","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom Tkinter import *\nimport tkFileDialog\nimport re\nimport tkFont\nimport tkMessageBox\nimport math\nimport os\nfrom itertools import izip\nimport moduloalineador\nimport modulopreprocesamiento\nimport modulovalidaralineacion\nimport moduloalineacion\n\nsourcecorpus=\"\"\ntargetcorpus=\"\"\n\nsalida=\"\"\n\nrutaguardarsource=\"\"\nrutaguardartarjet=\"\"\n\nprocsource=\"\"\nproctarjet=\"\"\n\nrutasalidaeval=\"\"\n\nventana = Tk()\n\ndef abrirTextoEspaniol():\n global sourcecorpus\n sourcecorpus=tkFileDialog.askopenfilenames()\n textoespn_str.set(sourcecorpus)\n return sourcecorpus\n\ndef abrirTextoMixteco():\n global targetcorpus\n targetcorpus=tkFileDialog.askopenfilenames()\n textomixteco_str.set(targetcorpus)\n return targetcorpus \n\ncaracteres=[\"('\",\"',)\"]\ndef borrarCaracteres(texto):\n for caracter in caracteres:\n texto=texto.replace(caracter,\"\")\n return texto\n \ndef guardarCorpus():\n global salida\n salida=tkFileDialog.asksaveasfilename()\n textosalida_str.set(salida)\n return salida\n\ndef limpiaralineacion():\n moduloalineador.eliminarTablaAlineaciones()\n \ndef limpiarpreprocesado():\n modulopreprocesamiento.limpiartablacorpusespaniol()\n modulopreprocesamiento.limpiartablacorpusmixteco()\n tkMessageBox.showinfo(\"Información\",\"Los textos se han limpiado\")\n \ndef alinear():\n global sourcecorpus\n global targetcorpus\n global salida\n print(sourcecorpus)\n print(targetcorpus)\n print(salida)\n origen=borrarCaracteres(str(sourcecorpus))\n destino=borrarCaracteres(str(targetcorpus))\n memoria=borrarCaracteres(str(salida))\n \n if memoria==\"\":\n tkMessageBox.showwarning(\"Advertencia\",\"Por favor, seleccione su ruta para guardar\")\n \n else:\n \n modulopreprocesamiento.segmentarCorpusEspaniol(origen)\n modulopreprocesamiento.segmentarCorpusMixteco(destino)\n #guardando los procesados a bd\n modulopreprocesamiento.guardarPreprocesadoespaniol()\n modulopreprocesamiento.guardarPreprocesadomixteco()\n \n moduloalineador.principal(memoria)\n tkMessageBox.showinfo(\"Alinear\",\"Textos alineados correctamente\")\n \n\nventana.title(\"Sistema de alineación automático español-mixteco VE'E SAVI\")\n\n#ventana.config(bg=\"#c1bfea\")\n#ventana.geometry(\"500x300\")\nventana.resizable(width=FALSE, height=FALSE)\n#ventana.resizable(width=500,height=500)\nventana.winfo_screenwidth()\nArial = tkFont.Font(family=\"Arial\", size=12, weight=\"bold\")\n\n\nrutaespnproc_label = Label(ventana,text=\"Archivo de texto en español:\",font=Arial)\nrutaespnproc_label.grid(row=0, column=0)\n\ntextoespn_str = StringVar()\ntxtespnproc_entry = Entry(ventana,textvariable=textoespn_str,font=Arial)\n#txtespn_entry = Label(ventana,text=\" \",font=Arial)\ntxtespnproc_entry.grid(row=0, column=1,ipadx=6, ipady=6)\ntxtespnproc_entry.configure(width=40)\n\nbtnAbrirespn = Button(ventana,text=\"Seleccionar\",background=\"#207ce5\",foreground=\"#fff\",anchor=\"center\",font=Arial,command=abrirTextoEspaniol)\nbtnAbrirespn.grid(row=0, column=2,ipadx=6, ipady=6)\n\nrutamixtecoproc_label= Label(ventana,text=\"Archivo de texto en mixteco:\",font=Arial)\nrutamixtecoproc_label.grid(row=1, column=0)\n\ntextomixteco_str = StringVar()\ntxtmixtecoproc_entry = Entry(ventana,textvariable=textomixteco_str,font=Arial)\ntxtmixtecoproc_entry.grid(row=1, column=1,ipadx=6, ipady=6)\ntxtmixtecoproc_entry.configure(width=40)\n\nbtnrutamixteco = Button(ventana,text=\"Seleccionar\", background=\"#207ce5\",anchor=\"center\",font=Arial,foreground=\"#fff\",command=abrirTextoMixteco)\nbtnrutamixteco.grid(row=1, column=2,ipadx=6, ipady=6)\n\nbtnalinear = Button(ventana,text=\"Alinear\", background=\"#207ce5\",anchor=\"center\",font=Arial,foreground=\"#fff\",command=alinear)\nbtnalinear.grid(row=2, column=1,ipadx=6, ipady=6)\n\nrutasalidamixteco_label= Label(ventana,text=\"Ruta de salida:\",font=Arial)\nrutasalidamixteco_label.grid(row=3, column=0)\n\ntextosalida_str = StringVar()\ntextosalida_entry = Entry(ventana,textvariable=textosalida_str,font=Arial)\ntextosalida_entry.grid(row=3, column=1,ipadx=6, ipady=6)\ntextosalida_entry.configure(width=40)\n\nbtnsalida = Button(ventana,text=\"Guardar\", background=\"#207ce5\",anchor=\"center\",font=Arial,foreground=\"#fff\",command=guardarCorpus)\nbtnsalida.grid(row=3, column=2,ipadx=6, ipady=6)\nbtnsalida.configure()\n\nventana.mainloop()\n","sub_path":"src/alineadorveesavi.py","file_name":"alineadorveesavi.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"176462410","text":"import math\nimport time\nimport torch\nimport torch.nn as nn\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass TransformerModel(nn.Module):\n def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.2):\n super(TransformerModel, self).__init__()\n from torch.nn import TransformerEncoder, TransformerEncoderLayer\n self.model_type = 'Transformer'\n self.src_mask = None\n self.pos_encoder = PositionalEncoding(ninp, dropout)\n encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)\n self.encoder = nn.Embedding(ntoken, ninp)\n self.ninp = ninp\n self.decoder = nn.Linear(ninp, ntoken)\n\n self.init_weights()\n\n def _generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n def init_weights(self, initrange=0.1):\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.zero_()\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, src):\n if self.src_mask is None or self.src_mask.size(0) != len(src):\n device = src.device\n mask = self._generate_square_subsequent_mask(len(src)).to(device)\n self.src_mask = mask\n src = self.encoder(src) * math.sqrt(self.ninp)\n src = self.pos_encoder(src)\n output = self.transformer_encoder(src, self.src_mask)\n output = self.decoder(output)\n return output\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\n\ndef batchify(data, btchsz):\n data = tokenizer(data, mode='encode')\n # Divide the dataset into btchsz parts.\n nbatch = data.size(0) // btchsz\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data.narrow(0, 0, nbatch * btchsz)\n # Evenly divide the data across the btchsz batches.\n data = data.view(btchsz, -1).t().contiguous()\n return data.to(device)\n\n\ndef get_batch(source, i, bptt=35):\n seq_len = min(bptt, len(source) - 1 - i)\n data = source[i:i + seq_len]\n target = source[i + 1:i + 1 + seq_len].view(-1)\n return data, target\n\n\ndef pad(sequences, maxlen=0):\n if maxlen == 0:\n length = max(map(len, sequences))\n else:\n sequences = filter(lambda x: len(x) < maxlen+1, sequences)\n length = maxlen\n return ['^' + seq + ' ' * (length - len(seq)) for seq in sequences]\n\n\ndef tokenizer(arr, mode='encode'):\n itos = {\"0\": 'H', \"1\": '9', \"2\": 'D', \"3\": 'r', \"4\": 'T', \"5\": 'R', \"6\": 'V', \"7\": '4', \"8\": 'c', \"9\": 'l',\n \"10\": 'b', \"11\": '.', \"12\": 'C', \"13\": 'Y', \"14\": 's', \"15\": 'B', \"16\": 'k', \"17\": '+', \"18\": 'p',\n \"19\": '2', \"20\": '7', \"21\": '8', \"22\": 'O', \"23\": '%', \"24\": 'o', \"25\": '6', \"26\": 'N', \"27\": 'A',\n \"28\": 't', \"29\": '$', \"30\": '(', \"31\": 'u', \"32\": 'Z', \"33\": '#', \"34\": 'M', \"35\": 'P', \"36\": 'G',\n \"37\": 'I', \"38\": '=', \"39\": '-', \"40\": 'X', \"41\": '@', \"42\": 'E', \"43\": ':', \"44\": '\\\\', \"45\": ')',\n \"46\": 'i', \"47\": 'K', \"48\": '/', \"49\": '{', \"50\": 'h', \"51\": 'L', \"52\": 'n', \"53\": 'U', \"54\": '[',\n \"55\": '0', \"56\": 'y', \"57\": 'e', \"58\": '3', \"59\": 'g', \"60\": 'f', \"61\": '}', \"62\": '1', \"63\": 'd',\n \"64\": 'W', \"65\": '5', \"66\": 'S', \"67\": 'F', \"68\": ']', \"69\": 'a', \"70\": 'm', \"71\": '^', \"72\": ' '}\n stoi = {v: k for k, v in itos.items()}\n if mode == 'encode':\n return torch.tensor([[stoi[x] for x in ex] for ex in arr], dtype=torch.long, device=device).contiguous()\n elif mode == 'decode':\n return [''.join([x for x in ex]).strip().replace('^', '') for ex in arr]\n else:\n raise NotImplementedError(\"Only 'encode' and 'decode are available as modes!\")\n\n\ndef train(epoch, model, optimizer, train_data, scheduler, criterion=nn.CrossEntropyLoss(), bptt=35, ntok=73):\n model.train() # Turn on the train mode\n total_loss = 0.\n start_time = time.time()\n for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):\n data, targets = get_batch(train_data, i)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output.view(-1, ntok), targets)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)\n optimizer.step()\n\n total_loss += loss.item()\n log_interval = 200\n if batch % log_interval == 0 and batch > 0:\n cur_loss = total_loss / log_interval\n elapsed = time.time() - start_time\n print('| epoch {:3d} | {:5d}/{:5d} batches | '\n 'lr {:02.2f} | ms/batch {:5.2f} | '\n 'loss {:5.2f} | ppl {:8.2f}'\n .format(epoch, batch, len(train_data) // bptt, scheduler.get_lr()[0], elapsed * 1000 / log_interval,\n cur_loss, math.exp(cur_loss)))\n total_loss = 0\n start_time = time.time()\n\n\ndef evaluate(eval_model, data_source, criterion=nn.CrossEntropyLoss(), bptt=35, ntokens=73):\n eval_model.eval() # Turn on the evaluation mode\n total_loss = 0.\n with torch.no_grad():\n for i in range(0, data_source.size(0) - 1, bptt):\n data, targets = get_batch(data_source, i)\n output = eval_model(data)\n output_flat = output.view(-1, ntokens)\n total_loss += len(data) * criterion(output_flat, targets).item()\n return total_loss / (len(data_source) - 1)\n\n\n\n\n# batch_size = 20\n# eval_batch_size = 10\n# train_data = batchify(train_txt, batch_size)\n# val_data = batchify(val_txt, eval_batch_size)\n# test_data = batchify(test_txt, eval_batch_size)\n#\n# emsize = 200 # embedding dimension\n# nhid = 200 # the dimension of the feedforward network model in nn.TransformerEncoder\n# nlayers = 2 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder\n# nhead = 2 # the number of heads in the multiheadattention models\n# dropout = 0.2 # the dropout value\n# model = TransformerModel(73, emsize, nhead, nhid, nlayers, dropout).to(device)\n#\n# lr = 5.0 # learning rate\n# optimizer = torch.optim.SGD(model.parameters(), lr=lr)\n# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)\n#\n# best_val_loss = float(\"inf\")\n# epochs = 3 # The number of epochs\n# best_model = None\n#\n# for epoch in range(1, epochs + 1):\n# epoch_start_time = time.time()\n# train()\n# val_loss = evaluate(model, val_data)\n# print('-' * 89)\n# print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '\n# 'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss)))\n# print('-' * 89)\n#\n# if val_loss < best_val_loss:\n# best_val_loss = val_loss\n# best_model = model\n#\n# scheduler.step()\n\n\n","sub_path":"torch_transformer.py","file_name":"torch_transformer.py","file_ext":"py","file_size_in_byte":7600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"304894186","text":"'''\nDaily after each 1 minutes\n'''\n\nimport json\nimport time\nimport os.path\nfrom os import path\nimport sys\nimport datetime\nimport random\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nuser_id = 179880094\n\ndriver = webdriver.Chrome('./chromedriver')\ndriver.get('https://www.zomato.com/webroutes/user/network?page=1&userId=%d&type=following' % (user_id))\ndata = driver.find_element_by_xpath(\"/html/body/pre\").text\njson_data = json.loads(data)\ntotal_pages = json_data['sections']['SECTION_USER_FOLLOWER']['following']['totalPages']\n\n# Fetch last processed page number\nuser_pages_processed_file_path = \"to_be_follow_page_processed.txt\"\nstart_page = 1\nif path.exists(user_pages_processed_file_path):\n read_file = open(user_pages_processed_file_path, \"r\")\n file_content = read_file.read()\n if len(file_content) > 0:\n start_page = int(file_content)\n start_page = start_page + 1\n\nlogged = False\nfor page in range(start_page, (total_pages+1)):\n print('Start Processing Page : %d' % (page))\n driver.get('https://www.zomato.com/webroutes/user/network?page=%d&userId=%d&type=following' % (page, user_id))\n data = driver.find_element_by_xpath(\"/html/body/pre\").text\n json_data = json.loads(data)\n followers = json_data['entities']['USER']\n print(followers)\n sys.exit\n\n for value in followers.keys():\n print(followers[value]['profile_url'])\n \n # Save user profile url in file\n followed_users_records_file = open('shrutu_following_179880094.txt', 'a')\n followed_users_records_file.write('\\n%s' % (followers[value]['profile_url']))\n followed_users_records_file.close()\n\n time_sleep = random.randint(2, 4)\n # print(\"Wait for %d secs...\" % (time_sleep))\n # time.sleep(time_sleep)\n\n # Save page number\n file1 = open('to_be_follow_page_processed.txt', \"w\") # write mode \n file1.write('%d' % (page))\n file1.close()","sub_path":"fetch_followings.py","file_name":"fetch_followings.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"443605703","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\n\nargs = sys.argv\nn_args = len(args[1:])\n\nassert (n_args==3), \"Please give `$(pwd)`, data folder name, and the number from `CollectAndSortFiles.sh`.\"\n\nproject_folder = args[1]\ntheName = args[2]\n\nimport re\nproject_folder = re.sub('PythonScripts$', '', project_folder)\n\n# The `.csv`-file will be in path:\npath2csv = project_folder + \"{}/Data_Collection.csv\".format(theName)\npath2out = project_folder + \"{}/outputs/\".format(theName)\n\nn_perm = int(args[3]) # Number of ways in which to combine the protonation states of the single molecules forming a neutral cluster\n\nprint(\"Project folder:\",project_folder)\nprint(\"Data folder:\",theName)\nprint(\"CSV file:\",path2csv)\nprint(\"Permutations:\",n_perm)\nprint(\"\"\"\n\nWelcome to Clusterin'Clusters!\nPlease enjoy your coffee while we take care of your clusters...\n\nYou will find the results in {}\n\n\"We're a lighthouse, your call.\"\n\n\"\"\".format(path2out))\n","sub_path":"Archive/testCluster.py","file_name":"testCluster.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"369966798","text":"import re\nimport pandas as pd\nfrom progressbar import ProgressBar\nfrom nltk.tokenize import word_tokenize\n\ndef load_biocreative(bcfilepath='./data/train/train.in', bcevalpath='./data/train/GENE.eval', procout='./data/processed.txt'):\n \"\"\" Function to parse and format the biocreative dataset.\n Arguments:\n bcfilepath: file path to the train/test corpus.\n bcevalpath: file path to the labels.\n procout: file path to save the processed output text file.\"\"\"\n\n bcfile = open(bcfilepath)\n\n # list to contain text information\n templist = []\n\n for line in bcfile.readlines():\n linere = re.match(r'[^\\s]+', line)\n lineid = linere.group(0)\n txtstart = linere.span()[1]+1\n text = line[txtstart:].replace('\\n', '')\n text = word_tokenize(text)\n templist.append([lineid, text])\n\n bcfile.close()\n tempdf = pd.DataFrame(templist, columns=['id', 'text'])\n\n # calculating indexes of each word. this will be used to join with label data later\n tempdf = tempdf.explode('text')\n tempdf['textlen'] = tempdf['text'].apply(len)\n tempg = tempdf.groupby('id')\n tempdf['cummulative'] = tempg['textlen'].cumsum()\n tempdf['textpos'] = tempg['cummulative'].rank(ascending='True')\n tempdf['textind'] = tempdf.cummulative - tempdf.textlen\n tempdf.textind = tempdf.textind.astype(int)\n\n bceval = open(bcevalpath, encoding='utf8')\n # list to contain label data\n evaltemp = []\n for line in bceval.readlines():\n lineid = re.match(r'[^|]+', line).group(0)\n entstart = re.findall(r'\\|(\\d*)\\s', line)[0]\n entend = re.findall(r'\\s(\\d*)\\|', line)[0]\n # entstart = re.match(r'\\d*', line[15:]).group(0)\n # entend = re.match(r'\\d*', line[15+len(entstart)+1:]).group(0)\n indtilnow = len(lineid+entstart+entend)+3\n entstart = int(entstart)\n entend = int(entend)\n ent = line[indtilnow:].replace('\\n','').split(' ')\n evaltemp.append([lineid, entstart, entend, ent])\n\n bceval.close()\n\n evaldf = pd.DataFrame(evaltemp, columns=['id', 'entity_start', 'entity_end', 'entity'])\n evaldf = evaldf.explode('entity')\n evaldf['ent_len'] = evaldf['entity'].apply(len)\n # calculating indexes to help with joining later\n evaldf = evaldf.sort_values(['id', 'entity_start', 'entity_end'])\n evalg = evaldf.groupby(['id', 'entity_start', 'entity_end'])\n evaldf['cummulative'] = evalg['ent_len'].cumsum()\n evaldf['entpos'] = evalg['cummulative'].rank(ascending=True)\n evaldf['entind'] = evaldf.entity_start + evaldf.cummulative - evaldf.ent_len\n evaldf.entind = evaldf.entind.astype(int)\n evaldf['label'] = evaldf.apply(lambda x: 'B' if x.entpos == 1.0 else 'I', axis=1)\n\n tempdf['id'] = tempdf['id'].astype(str)\n evaldf['id'] = evaldf['id'].astype(str)\n\n # joining per text id and index of entity\n finaldf = pd.merge(tempdf, evaldf,\n how='left',\n left_on=['id', 'textind'],\n right_on=['id', 'entind'],\n suffixes=('_t', '_e'))\n # text not in the evaluated data are considered to O (outside)\n finaldf.label = finaldf['label'].fillna('O')\n finaldf = finaldf[['id', 'text', 'textind', 'entity', 'entind','label', 'entity_start', 'entity_end']]\n\n towritefile = open(procout, 'w+')\n\n pbar = ProgressBar()\n for pid in pbar(finaldf.id.unique()):\n for tx, lb in finaldf[finaldf.id == pid][['text', 'label']].values:\n towritefile.write('{}\\t{}'.format(tx, lb))\n towritefile.write('\\n')\n towritefile.write('\\n')\n\n towritefile.close()\n\n\n\ndef load_data(training_file_path):\n sentence_tokens = []\n sentence_categories = []\n training_data=[]\n for line in open(training_file_path, encoding='utf8').readlines():\n stripped = line.strip()\n if stripped:\n token, category = stripped.split('\\t')\n sentence_tokens.append(token)\n sentence_categories.append(category)\n else:\n sentence = list(zip(sentence_tokens, sentence_categories))\n training_data.append(sentence)\n sentence_tokens = []\n sentence_categories = []\n return training_data\n\ndef word2features(sent, i):\n word = sent[i][0]\n\n features = {\n 'bias': 1.0,\n 'word.lower()': word.lower(),\n 'word_suffix': word[-3:],\n 'word.isupper()': word.isupper(),\n 'word.istitle()': word.istitle(),\n 'word.isdigit()': word.isdigit(),\n }\n if i > 0:\n word1 = sent[i-1][0]\n features.update({\n '-1:word.lower()': word1.lower(),\n '-1:word.istitle()': word1.istitle(),\n '-1:word.isupper()': word1.isupper(),\n })\n else:\n features['BOS'] = True\n\n if i < len(sent)-1:\n word1 = sent[i+1][0]\n features.update({\n '+1:word.lower()': word1.lower(),\n '+1:word.istitle()': word1.istitle(),\n '+1:word.isupper()': word1.isupper(),\n })\n else:\n features['EOS'] = True\n\n return features\n\ndef newword2features(sent, i):\n word = sent[i][0]\n\n features = {\n 'bias': 1.0,\n 'word.lower()': word.lower(),\n 'word_suffix': word[-3:],\n 'word.isupper()': word.isupper(),\n 'word.istitle()': word.istitle(),\n 'word.isdigit()': word.isdigit(),\n 'word.isalnum()': word.isalnum(),\n 'word.endcaps': word[-1].isupper(),\n 'word.hasslash': '/' in word\n }\n if i > 0:\n word1 = sent[i-1][0]\n features.update({\n '-1:word.lower()': word1.lower(),\n '-1:word.istitle()': word1.istitle(),\n '-1:word.isupper()': word1.isupper(),\n '-1:word.isalnum()': word1.isalnum(),\n '-1:word.endcaps': word1[-1].isupper(),\n '-1:word.hasslash': '/' in word1\n })\n else:\n features['BOS'] = True\n\n if i < len(sent)-1:\n word1 = sent[i+1][0]\n features.update({\n '+1:word.lower()': word1.lower(),\n '+1:word.istitle()': word1.istitle(),\n '+1:word.isupper()': word1.isupper(),\n '+1:word.isalnum()': word1.isalnum(),\n '+1:word.endcaps': word1[-1].isupper(),\n '+1:word.hasslash': '/' in word1\n })\n else:\n features['EOS'] = True\n\n return features\n\ndef sent2labels(sent):\n return [label for token, label in sent]\n\ndef sent2features(sent):\n return [word2features(sent, i) for i in range(len(sent))]\n\ndef newsent2features(sent):\n return [newword2features(sent, i) for i in range(len(sent))]\n\ndef text2features(intext):\n sent = [(w, 'O') for w in word_tokenize(intext)]\n return [word2features(sent, i) for i in range(len(sent))]\n\ndef newtext2features(intext):\n sent = [(w, 'O') for w in word_tokenize(intext)]\n return [newword2features(sent, i) for i in range(len(sent))]\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":6929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"432280829","text":"from tqdm import tqdm\nfrom kaldiio import ReadHelper\nimport pickle\nimport sys\n\n\ndef read_feature_data(file):\n keys = []\n audio = []\n with ReadHelper(file) as reader:\n for key, array in reader:\n audio.append(array)\n keys.append(key)\n return keys, audio\n\nif __name__ == '__main__':\n\n base_path = '/home/ubuntu/data_wsj/'\n\n audio_feat_paths = ['train_si284', 'test_eval92', 'test_dev93']\n types = ['train', 'eval', 'dev']\n\n for data_type, audio_feat_path in zip(types, audio_feat_paths):\n\n file = 'scp:'+ base_path + audio_feat_path + '/feats.scp'\n print(file)\n\n keys, audio_feat = read_feature_data(file)\n with open(base_path + data_type+'_data.pkl', 'wb') as f:\n pickle.dump(audio_feat, f)\n with open(base_path + data_type+'_key.pkl', 'wb') as f:\n pickle.dump(keys, f)\n\n\n\n \n","sub_path":"preprocessing_audio.py","file_name":"preprocessing_audio.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"396834946","text":"\"\"\"\nparams.py \n\nnetParams is a dict containing a set of network parameters using a standardized structure\n\nsimConfig is a dict containing a set of simulation configurations using a standardized structure\n\nContributors: salvadordura@gmail.com\n\"\"\"\n\nnetParams = {} # dictionary to store sets of network parameters\nsimConfig = {} # dictionary to store sets of simulation configurations\n\n\n###############################################################################\n#\n# MPI HH TUTORIAL PARAMS\n#\n###############################################################################\n\n###############################################################################\n# NETWORK PARAMETERS\n###############################################################################\n\n# Cell properties list\nnetParams['cellParams'] = []\n\n## PYR cell properties\ncellRule = {'label': 'PYR', 'conditions': {'cellType': 'PYR'}, 'sections': {}}\nsoma = {'geom': {}, 'topol': {}, 'mechs': {}, 'syns': {}} # soma properties\nsoma['geom'] = {'diam': 18.8, 'L': 18.8, 'Ra': 123.0}\nsoma['mechs']['hh'] = {'gnabar': 0.12, 'gkbar': 0.036, 'gl': 0.003, 'el': -70} \nsoma['syns']['NMDA'] = {'_type': 'Exp2Syn', '_loc': 0.5, 'tau1': 0.1, 'tau2': 5, 'e': 0}\n\ncellRule['sections'] = {'soma': soma} # add sections to dict\nnetParams['cellParams'].append(cellRule) # add dict to list of cell properties\n\n\n# Population parameters\nnetParams['popParams'] = [] # create list of populations - each item will contain dict with pop params\nnetParams['popParams'].append({'popLabel': 'PYR', 'cellModel': 'HH', 'cellType': 'PYR', 'numCells': 500}) # add dict with params for this pop \nnetParams['popParams'].append({'popLabel': 'background', 'cellModel': 'NetStim', 'rate': 10, 'noise': 0.5, 'source': 'random'}) # background inputs\n\nnetParams['popTagsCopiedToCells'] = ['popLabel', 'cellModel', 'cellType']\n\n\n# Connectivity parameters\nnetParams['connParams'] = [] \n\nnetParams['connParams'].append(\n {'preTags': {'popLabel': 'PYR'}, 'postTags': {'popLabel': 'PYR'},\n 'weight': 0.001, # weight of each connection\n 'delay': '0.2+gauss(13.0,1.4)', # delay min=0.2, mean=13.0, var = 1.4\n 'threshold': 10, # threshold\n 'convergence': 'uniform(1,15)'}) # convergence (num presyn targeting postsyn) is uniformly distributed between 1 and 15\n\nnetParams['connParams'].append(\n {'preTags': {'popLabel': 'background'}, 'postTags': {'cellType': 'PYR'}, # background -> PYR\n 'connFunc': 'fullConn', # all-to-all (can omit this param)\n 'weight': 0.008, # fixed weight of 0.08\n 'syn': 'NMDA', # target NMDA synapse\n 'delay': 'uniform(1,5)'}) # uniformly distributed delays between 1-5ms\n\n\n###############################################################################\n# SIMULATION PARAMETERS\n###############################################################################\n\nsimConfig = {} # dictionary to store simConfig\n\n# Simulation parameters\nsimConfig['duration'] = 1*1e3 # Duration of the simulation, in ms\nsimConfig['dt'] = 0.025 # Internal integration timestep to use\nsimConfig['randseed'] = 1 # Random seed to use\nsimConfig['createNEURONObj'] = 1 # create HOC objects when instantiating network\nsimConfig['createPyStruct'] = 1 # create Python structure (simulator-independent) when instantiating network\nsimConfig['verbose'] = 1 # show detailed messages \n\n\n# Recording \nsimConfig['recordCells'] = [] # whether to record cell traces or not\nsimConfig['recordTraces'] = {'Vsoma':{'sec':'soma','pos':0.5,'var':'v'}}\nsimConfig['recordStim'] = True # record spikes of cell stims\nsimConfig['recordStep'] = 0.1 # Step size in ms to save data (eg. V traces, LFP, etc)\n\n# Saving\nsimConfig['filename'] = 'mpiHHTut' # Set file output name\nsimConfig['saveFileStep'] = 1000 # step size in ms to save data to disk\nsimConfig['savePickle'] = False # Whether or not to write spikes etc. to a .mat file\nsimConfig['saveJson'] = False # Whether or not to write spikes etc. to a .mat file\nsimConfig['saveMat'] = False # Whether or not to write spikes etc. to a .mat file\nsimConfig['saveTxt'] = False # save spikes and conn to txt file\nsimConfig['saveDpk'] = False # save to a .dpk pickled file\n\n\n# Analysis and plotting \nsimConfig['plotRaster'] = True # Whether or not to plot a raster\nsimConfig['plotCells'] = [] # plot recorded traces for this list of cells\nsimConfig['plotLFPSpectrum'] = False # plot power spectral density\nsimConfig['maxspikestoplot'] = 3e8 # Maximum number of spikes to plot\nsimConfig['plotConn'] = False # whether to plot conn matrix\nsimConfig['plotWeightChanges'] = False # whether to plot weight changes (shown in conn matrix)\nsimConfig['plot3dArch'] = False # plot 3d architecture\n\n\n","sub_path":"examples/HHTut.py","file_name":"HHTut.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"166758059","text":"'''\ntfrecord로 변환하기 위해 tf-slim 오픈소스를 사용하는데 이때\ndirectory형태는 \nlabel -- images . . . \nlabel -- images . . .\n .\n .\n . \n 되어있기 때문에 기존 directory의 구조를 바꿔준다. \n'''\n\nimport os\nimport cv2\n\ndef _main():\n ORIGINAL_IMAGE = 'C:\\\\Users\\\\iceba\\\\develop\\\\python\\\\naver_d2_fest_6th\\\\img\\\\dummy'\n DIVIDE_IMAGE = 'C:\\\\Users\\\\iceba\\\\develop\\\\python\\\\naver_d2_fest_6th\\\\img\\\\total'\n\n label_check_list = list()\n idx = 1\n\n for img in os.listdir(ORIGINAL_IMAGE):\n original_image = ORIGINAL_IMAGE + os.sep + img\n label = img.split('_')[0]\n if not label in label_check_list:\n print(str(idx)+' directory success')\n label_check_list.append(str(label))\n os.mkdir(DIVIDE_IMAGE+os.sep+str(idx))\n idx += 1\n # print(DIVIDE_IMAGE+os.sep+str(idx-1)+os.sep+img)\n copy_image = cv2.imread(original_image)\n cv2.imwrite(DIVIDE_IMAGE+os.sep+str(idx-1)+os.sep+img, copy_image)\n\nif __name__ == \"__main__\":\n _main()\n","sub_path":"src/utils/original_subdir_label.py","file_name":"original_subdir_label.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"302329636","text":"# tr Plugin for ninjabot\n\nimport re\n\nlast_messages = {}\n\nclass Plugin:\n def __init__(self, controller, config):\n self.controller = controller\n\n def on_incoming(self, msg):\n # Ignore those who have been ignored...\n if self.controller.is_ignored(msg.nick):return\n\n if msg.type != msg.CHANNEL:return\n\n body = msg.body\n\n # check if the message matches the (y|tr)/blah/blah/ syntax\n matches = re.match(r'''(?x) # verbose mode\n ^(?:y|tr)(/) # starts with y/ or tr/\n (\n (?:\\\\\\1)* # any number of escaped /\n [^/]+ # at least 1 non-/\n (?:\\\\\\1[^/]*)* # an escaped / and any number of non-/, repeatedly\n )\n / # literal /\n ((?:\\\\\\1)*[^/]+(?:\\\\\\1[^/]*)*) # the above again\n /?$ # end with optional /\n ''', body)\n\n if matches:\n groups = matches.groups()\n # did they have a last message?\n if msg.nick in last_messages:\n last_message = last_messages[msg.nick]\n if len(groups) == 2:\n pattern, replacement = [s.replace('\\\\'+groups[0], groups[0]) for s in groups][1:]\n if len(pattern) == len(replacement):\n body = last_message.translate(dict(zip(map(ord, pattern), replacement)))\n self.controller.privmsg(msg.channel, '{}: {}'.format(msg.nick, body))\n else:\n # was invalid, return without adding this to their last messages\n return\n\n # add it to the last messages dictionary\n last_messages[msg.nick] = body\n","sub_path":"Plugins/tr.py","file_name":"tr.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"457993798","text":"def solution(name):\n idx = len(name)\n init = idx * 'A'\n\n count = 0\n index = 0\n right = 1\n left = 1\n\n for i in range(len(name)):\n if name[i] == init[i]:\n continue\n else:\n count += min(ord(name[i]) - ord('A'), ord('Z') - ord(name[i]) + 1)\n count += 1\n\n for i in range(1, len(name)):\n if name[index + i] == \"A\":\n right += 1\n else:\n break\n for i in range(1, len(name)):\n if name[index - i] == \"A\":\n left += 1\n else:\n break\n\n if right > left:\n count += left\n index -= left\n else:\n count += right\n index += right\n\n return count - 2\n","sub_path":"hiseoung/PROGRAMERS/20210719_PROGRAMERS_조이스틱(level2).py","file_name":"20210719_PROGRAMERS_조이스틱(level2).py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"509330873","text":"# -*- coding: UTF-8 -*-\n\nfrom django.views.generic import View\nfrom django.conf import settings\nfrom django.http import HttpResponseRedirect\nfrom django.utils import translation\n\nfrom . import forms\n\n\nclass IndexView(View):\n def get(self, request):\n next = \"/\"\n if \"next\" in request.GET:\n next = request.GET.get(\"next\")\n\n response = HttpResponseRedirect(next)\n\n if not request.GET:\n return response\n\n form = forms.LanguageCodeForm(data=request.GET)\n if not form.is_valid():\n return response\n\n language = form.cleaned_data['language']\n\n if hasattr(request, \"session\"):\n request.session[translation.LANGUAGE_SESSION_KEY] = language\n else:\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, language)\n translation.activate(language)\n\n return response\n","sub_path":"SprajoUnitech/venv/Lib/site-packages/django_languageselect/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"142753","text":"import pandas as pd\r\nimport numpy as np\r\nfrom bokeh.layouts import row\r\nfrom shapely.geometry import Point\r\nfrom shapely.geometry.polygon import Polygon\r\nfrom bokeh.io import output_file, show\r\nfrom bokeh.models import (\r\n GMapPlot, GMapOptions, ColumnDataSource, Circle, Range1d, PanTool, WheelZoomTool, BoxSelectTool\r\n)\r\n\r\ndata = pd.read_csv('C:/Users/Milica/PycharmProjects/test/output/data.csv')\r\n\r\nnyc_lat = 40.7127753\r\nnyc_lon = -74.0059728\r\n\r\nA_lat = 40.92984285194642\r\nA_lon = -73.93167114270909\r\nB_lat = 40.587572417681216\r\nB_lon = -73.64046478061937\r\nC_lat = 40.49641357209898\r\nC_lon = -74.25288391532376\r\n\r\nD_lat = 40.77073119523123\r\nD_lon = -74.09069824847393\r\n\r\npickup_longitudes = data['pickup_longitude']\r\npickup_latitudes = data['pickup_latitude']\r\ndropoff_longitudes = data['dropoff_longitude']\r\ndropoff_latitudes = data['dropoff_latitude']\r\n\r\nlons_vect = [A_lon, B_lon, C_lon]\r\nlats_vect = [A_lat, B_lat, C_lat]\r\nlons_lats_vect = np.column_stack((lons_vect, lats_vect)) # Reshape coordinates\r\npolygon = Polygon(lons_lats_vect) # create polygon\r\n\r\npickup_longitudes2 = []\r\npickup_latitudes2 = []\r\ndropoff_longitudes2 = []\r\ndropoff_latitudes2 = []\r\n\r\nfor lat, lon in zip(pickup_latitudes, pickup_longitudes):\r\n point = Point(lon, lat)\r\n if not polygon.contains(point):\r\n pickup_latitudes2.append(lat)\r\n pickup_longitudes2.append(lon)\r\n\r\nfor lat2, lon2 in zip(dropoff_latitudes, dropoff_longitudes):\r\n point = Point(lon2, lat2)\r\n if not polygon.contains(point):\r\n dropoff_latitudes2.append(lat2)\r\n dropoff_longitudes2.append(lon2)\r\n\r\nmap_options = GMapOptions(lat=40.7127753, lng=-74.0059728, map_type=\"roadmap\", zoom=8)\r\n\r\nplot = GMapPlot(x_range=Range1d(), y_range=Range1d(), map_options=map_options)\r\nplot1 = GMapPlot(x_range=Range1d(), y_range=Range1d(), map_options=map_options)\r\nplot.title.text = \"NYC Pickups\"\r\nplot1.title.text = \"NYC Dropoffs\"\r\n\r\nplot.api_key = \"AIzaSyB_NaKqSwUSD_DDSkReMz9fycq96ph9VhE\"\r\n\r\nsource = ColumnDataSource(\r\n data=dict(\r\n lat=pickup_latitudes2,\r\n lon=pickup_longitudes2,\r\n )\r\n)\r\ntriangle = ColumnDataSource(\r\n data=dict(\r\n lat=lats_vect,\r\n lon=lons_vect,\r\n )\r\n)\r\nsource2 = ColumnDataSource(\r\n data=dict(\r\n lat=dropoff_latitudes2,\r\n lon=dropoff_longitudes2,\r\n )\r\n)\r\n\r\ncircle_triangle = Circle(x=\"lon\", y=\"lat\", size=10, fill_color=\"black\", fill_alpha=0.8, line_color=None)\r\nplot.add_glyph(triangle, circle_triangle)\r\nplot1.add_glyph(triangle, circle_triangle)\r\n\r\ncircle = Circle(x=\"lon\", y=\"lat\", size=5, fill_color=\"blue\", fill_alpha=0.8, line_color=None)\r\nplot.add_glyph(source, circle)\r\n\r\ncircle2 = Circle(x=\"lon\", y=\"lat\", size=5, fill_color=\"red\", fill_alpha=0.8, line_color=None)\r\nplot1.add_glyph(source2, circle2)\r\n\r\nplot.add_tools(PanTool(), WheelZoomTool(), BoxSelectTool())\r\nplot1.add_tools(PanTool(), WheelZoomTool(), BoxSelectTool())\r\noutput_file(\"pickUp_dropOff_modified_map.html\")\r\nshow(row(plot, plot1))\r\n","sub_path":"siap_git/visualization/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"167912162","text":"# Copyright (c) 2016 Qumulo, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\nimport qumulo.lib.request as request\nfrom qumulo.lib.uri import UriBuilder\n\n@request.request\ndef replicate(conninfo, credentials, relationship):\n method = \"POST\"\n uri = \"/v1/replication/source-relationships/{}/replicate\".format(\n relationship)\n return request.rest_request(\n conninfo, credentials, method, unicode(uri))\n\n@request.request\ndef create_source_relationship(\n conninfo,\n credentials,\n target_path,\n address,\n source_id=None,\n source_path=None,\n source_root_read_only=None,\n map_local_ids_to_nfs_ids=None,\n continuous_replication_enabled=None,\n target_port=None):\n\n body = {\n 'target_root_path': target_path,\n 'target_address': address\n }\n\n if source_id is not None:\n body['source_root_id'] = source_id\n\n if source_path is not None:\n body['source_root_path'] = source_path\n\n if source_root_read_only is not None:\n body['source_root_read_only'] = source_root_read_only\n\n if target_port is not None:\n body['target_port'] = target_port\n\n if map_local_ids_to_nfs_ids is not None:\n body['map_local_ids_to_nfs_ids'] = map_local_ids_to_nfs_ids\n\n if continuous_replication_enabled is not None:\n body['continuous_replication_enabled'] = continuous_replication_enabled\n\n method = \"POST\"\n uri = \"/v1/replication/source-relationships/\"\n return request.rest_request(conninfo, credentials, method, uri, body=body)\n\n@request.request\ndef list_source_relationships(conninfo, credentials):\n method = \"GET\"\n uri = \"/v1/replication/source-relationships/\"\n return request.rest_request(conninfo, credentials, method, uri)\n\n@request.request\ndef get_source_relationship(conninfo, credentials, relationship_id):\n method = \"GET\"\n uri = \"/v1/replication/source-relationships/{}\"\n return request.rest_request(\n conninfo, credentials, method, uri.format(relationship_id))\n\n@request.request\ndef delete_source_relationship(conninfo, credentials, relationship_id):\n method = \"DELETE\"\n uri = \"/v1/replication/source-relationships/{}\"\n return request.rest_request(\n conninfo, credentials, method, uri.format(relationship_id))\n\n@request.request\ndef delete_target_relationship(conninfo, credentials, relationship_id):\n method = \"POST\"\n uri = \"/v1/replication/target-relationships/{}/delete\"\n return request.rest_request(\n conninfo, credentials, method, uri.format(relationship_id))\n\n@request.request\ndef modify_source_relationship(\n conninfo, credentials,\n relationship_id,\n new_target_address=None,\n new_target_port=None,\n source_root_read_only=None,\n map_local_ids_to_nfs_ids=None,\n continuous_replication_enabled=None,\n blackout_windows=None,\n blackout_window_timezone=None,\n etag=None):\n\n method = \"PATCH\"\n uri = \"/v1/replication/source-relationships/{}\"\n\n body = {}\n if new_target_address is not None:\n body['target_address'] = new_target_address\n if new_target_port is not None:\n body['target_port'] = new_target_port\n if source_root_read_only is not None:\n body['source_root_read_only'] = source_root_read_only\n if map_local_ids_to_nfs_ids is not None:\n body['map_local_ids_to_nfs_ids'] = map_local_ids_to_nfs_ids\n if continuous_replication_enabled is not None:\n body['continuous_replication_enabled'] = continuous_replication_enabled\n if blackout_windows is not None:\n body['blackout_windows'] = blackout_windows\n if blackout_window_timezone is not None:\n body['blackout_window_timezone'] = blackout_window_timezone\n\n return request.rest_request(\n conninfo, credentials, method, uri.format(relationship_id), body=body,\n if_match=etag)\n\n@request.request\ndef list_source_relationship_statuses(conninfo, credentials):\n method = \"GET\"\n uri = \"/v1/replication/source-relationships/status/\"\n return request.rest_request(conninfo, credentials, method, uri)\n\n@request.request\ndef list_target_relationship_statuses(conninfo, credentials):\n method = \"GET\"\n uri = \"/v1/replication/target-relationships/status/\"\n return request.rest_request(conninfo, credentials, method, uri)\n\n@request.request\ndef get_source_relationship_status(conninfo, credentials, relationship_id):\n method = \"GET\"\n uri = \"/v1/replication/source-relationships/{}/status\"\n return request.rest_request(\n conninfo, credentials, method, uri.format(relationship_id))\n\n@request.request\ndef get_target_relationship_status(conninfo, credentials, relationship_id):\n method = \"GET\"\n uri = \"/v1/replication/target-relationships/{}/status\"\n return request.rest_request(\n conninfo, credentials, method, uri.format(relationship_id))\n\n@request.request\ndef authorize(\n conninfo,\n credentials,\n relationship_id,\n allow_non_empty_directory=None,\n allow_fs_path_create=None):\n method = \"POST\"\n\n uri = UriBuilder(\n path=\"/v1/replication/target-relationships/{}/authorize\".format(\n relationship_id))\n\n if allow_non_empty_directory is not None:\n uri.add_query_param(\n \"allow-non-empty-directory\",\n \"true\" if allow_non_empty_directory else \"false\")\n if allow_fs_path_create is not None:\n uri.add_query_param(\n \"allow-fs-path-create\",\n \"true\" if allow_fs_path_create else \"false\")\n\n return request.rest_request(\n conninfo, credentials, method, unicode(uri))\n\n@request.request\ndef reconnect_target_relationship(conninfo, credentials, relationship_id):\n method = \"POST\"\n uri = \"/v1/replication/target-relationships/{}/reconnect\"\n return request.rest_request(\n conninfo, credentials, method, uri.format(relationship_id))\n\n@request.request\ndef abort_replication(conninfo, credentials, relationship_id):\n method = \"POST\"\n uri = \"/v1/replication/source-relationships/{}/abort-replication\"\n return request.rest_request(\n conninfo, credentials, method, uri.format(relationship_id))\n\n@request.request\ndef make_target_writable(conninfo, credentials, relationship_id):\n method = \"POST\"\n uri = \"/v1/replication/target-relationships/{}/make-writable\"\n return request.rest_request(\n conninfo, credentials, method, uri.format(relationship_id))\n\n@request.request\ndef reverse_target_relationship(\n conninfo, credentials,\n relationship_id,\n source_address,\n source_port=None):\n method = \"POST\"\n uri = \"/v1/replication/source-relationships/reverse-target-relationship\"\n\n body = {\n 'target_relationship_id': relationship_id,\n 'source_address': source_address\n }\n if source_port is not None:\n body['source_port'] = source_port\n\n return request.rest_request(conninfo, credentials, method, uri, body=body)\n","sub_path":"actions/lib/qumulo/rest/replication.py","file_name":"replication.py","file_ext":"py","file_size_in_byte":7477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"494549217","text":"import json\nimport logging\nimport os\n\nfrom flask import render_template, send_from_directory\nfrom flask_jwt_extended import jwt_required\n\nimport core.config.config\nimport core.config.paths\nimport core.filters\nimport core.flags\nfrom core import helpers\nfrom server import app\nfrom server.context import running_context\nfrom server.security import roles_accepted_for_resources\nfrom . import database\n\nlogger = logging.getLogger(__name__)\n\ndatabase.initialize_resource_roles_from_cleared_database()\n\n\n# Custom static data\n@app.route('/client/')\ndef client_app_folder(filename):\n return send_from_directory(os.path.abspath(core.config.paths.client_path), filename)\n\n\n@app.route('/')\n@app.route('/playbook')\n@app.route('/scheduler')\n@app.route('/devices')\n@app.route('/triggers')\n@app.route('/cases')\n@app.route('/settings')\ndef default():\n return render_template(\"index.html\")\n\n\n@app.route('/apps/')\ndef app_page(app_name):\n return render_template(\"index.html\")\n\n\n@app.route('/login')\ndef login_page():\n return render_template(\"login.html\")\n\n\n@app.route('/availablesubscriptions', methods=['GET'])\n@jwt_required\n@roles_accepted_for_resources('cases')\ndef display_possible_subscriptions():\n return json.dumps(core.config.config.possible_events)\n\n\n@app.route('/widgets', methods=['GET'])\n@jwt_required\n@roles_accepted_for_resources('apps')\ndef list_all_widgets():\n return json.dumps({_app: helpers.list_widgets(_app) for _app in helpers.list_apps()})\n\n\ndef write_playbook_to_file(playbook_name):\n \"\"\"Writes a playbook to file.\n\n Args:\n playbook_name (str): The name of the playbook to write to a file.\n \"\"\"\n playbook_filename = os.path.join(core.config.paths.workflows_path, '{0}.playbook'.format(playbook_name))\n backup = None\n try:\n with open(playbook_filename) as original_file:\n backup = original_file.read()\n os.remove(playbook_filename)\n except (IOError, OSError) as e:\n logger.warning('Cannot read original playbook! Saving without backup! '\n 'Reason: {}'.format(helpers.format_exception_message(e)))\n\n app.logger.debug('Writing playbook {0} to file'.format(playbook_name))\n\n try:\n with open(playbook_filename, 'w') as workflow_out:\n playbook_json = running_context.controller.get_playbook_representation(playbook_name)\n workflow_out.write(json.dumps(playbook_json, sort_keys=True, indent=4, separators=(',', ': ')))\n except Exception as e:\n logger.error('Could not save playbook to file. Reverting file to original. '\n 'Error: {0}'.format(helpers.format_exception_message(e)))\n if backup is not None:\n with open(playbook_filename, 'w') as f:\n f.write(backup)\n","sub_path":"server/flaskserver.py","file_name":"flaskserver.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"65282672","text":"from datetime import datetime, timedelta\nimport sqlite3\nfrom common.ChannelUtil import ChannelUtil\nfrom discord.reaction import Reaction\nimport common.models.BaseModel as db\nfrom common.models.PermissionRole import PermissionRole\nfrom common.models.GamblingBet import GamblingBet\nfrom common.Util import Util\nfrom common.models.Channel import Channel\nfrom common.models.GamblingBet import GamblingBet\nfrom common.models.GamblingGame import GamblingGame\nfrom common.models.PermissionRole import PermissionRole\nfrom common.models.Member import Member\nfrom common.database.KfpMigrator import KfpMigrator\n\nfrom discord.guild import Guild, Role\nfrom common.models.Ranking import Ranking\nfrom peewee import SqliteDatabase\n\nMODULES = [Channel, GamblingBet, GamblingGame, Member, PermissionRole, Ranking]\n\nclass KfpDb():\n # {guild:[channel, channel,...] ... }\n __ignoreXpChannel = {}\n __autoClearChannel = {}\n\n def __init__(self, dbFile=r\"./common/KFP_bot.db\"):\n self.sqliteDb = SqliteDatabase(dbFile)\n KfpMigrator.KfpMigrate(self.sqliteDb)\n db.proxy.initialize(self.sqliteDb)\n self.sqliteDb.create_tables(MODULES)\n self.__ignoreXpChannel = ChannelUtil.getXPIgnoredChannels()\n self.__autoClearChannel = ChannelUtil.getAutoClearChannels()\n\n # For test only, do not use\n def teardown(self):\n self.sqliteDb.drop_tables(MODULES)\n self.sqliteDb.close()\n\n # For test only, do not use\n def get_database(self):\n return self.sqliteDb\n\n def has_member(self, member_id:int):\n return Member.select().where(Member.member_id == member_id).exists()\n\n # 透過會員ID讀取會員\n def get_member(self, member_id:int) -> Member:\n if self.has_member(member_id):\n return Member.get_by_id(member_id)\n return None\n \n # 增加新會員\n def add_member(self, member_id:int) -> Member:\n member = Member.create(member_id=member_id)\n member.save()\n return member\n \n # 增加復數會員\n def add_members(self, member_ids):\n data = []\n for member_id in member_ids:\n data.append({'member_id': member_id})\n Member.insert_many(data).execute()\n\n # 增加會員的經驗值\n def increase_exp(self, guild_id:int, channel_id:int, member_id:int, new_exp:int):\n if channel_id in self.__ignoreXpChannel.get(guild_id, []):\n return \n query = Member.select().where(Member.member_id == member_id)\n if not query.exists():\n return False\n member = query.get()\n member.exp = member.exp+new_exp\n member.save()\n return self.__update_rank_if_qualified(member_id)\n \n # 更新會員的硬幣數量, 數量可以是負數, 如果會員硬幣減至0, 以交易失敗為記\n def add_coin(self, member_id:int, amount:int):\n query = Member.select().where(Member.member_id == member_id)\n if not query.exists():\n return False\n member = query.get()\n return self.update_member_coin(member, amount)\n \n def update_member_coin(self, member: Member, amount: int) -> bool :\n newValue = member.coin + amount\n if (newValue < 0):\n return False\n member.coin = newValue\n member.save()\n return True\n \n def add_token(self, member_id:int, amount:int):\n query = Member.select().where(Member.member_id == member_id)\n if query.exists():\n member = query.get()\n else:\n member = self.add_member(member_id)\n member.token += amount\n member.save()\n \n # 如果需要升級會員等級便升級\n def __update_rank_if_qualified(self, member_id:int):\n member = Member.get_by_id(member_id)\n new_rank = member.rank\n while (member.exp > Util.get_rank_exp(new_rank)):\n new_rank += 1\n if new_rank != member.rank:\n member.rank = new_rank\n member.save()\n return member.rank\n\n # 會員等級排名\n def get_member_rank_order(self, member_id:int):\n target_exp = Member.get_by_id(member_id).exp\n return Member.select().where((Member.exp > target_exp)).count() + 1\n \n # 設定不需要增加經驗的頻道\n def set_ignore_xp_channel(self, guild_id: int, channel_id: int):\n ChannelUtil.setChannel(guild_id, channel_id, Util.ChannelType.IGNORE_XP, True)\n self.__ignoreXpChannel = ChannelUtil.getXPIgnoredChannels()\n\n # 取消不需要增加經驗的頻道\n def remove_ignore_xp_channel(self, guild_id: int, channel_id: int):\n ChannelUtil.removeChannel(guild_id, channel_id, Util.ChannelType.IGNORE_XP)\n self.__ignoreXpChannel = ChannelUtil.getXPIgnoredChannels()\n\n # 檢查此頻道是不是刪除用戶發言的頻道\n def is_channel_auto_clear(self, guild_id: int, channel_id: int) -> bool:\n return channel_id in self.__autoClearChannel.get(guild_id, [])\n\n # 獲得所有自動刪除頻道\n def get_auto_clear_channels(self, guild_id: int):\n return self.__autoClearChannel.get(guild_id, [])\n\n # 獲得管理身分組列表\n def load_permissions(self, role_type: Util.RoleType):\n result = []\n query = PermissionRole.select().where(PermissionRole.role_type == role_type)\n if query.exists():\n role: PermissionRole\n for role in query.iterator():\n result.append(role) \n return result\n \n def increase_counting_table(self, user_id:int, key:str, guild_id:int, ):\n now = datetime.today()\n nowf = now.replace(hour=0,minute=0, second=0, microsecond=0).timestamp()\n nowe = (datetime.today() + timedelta(days=1)).timestamp()\n query = Ranking.select().where(Ranking.ranking_key == key, Ranking.user_id == user_id, Ranking.timestamp >= nowf and Ranking.timestamp < nowe)\n if not query.exists():\n ranking = Ranking.insert(rankingt_type = Util.RankingType.REACTION,\n ranking_key = key,\n user_id = user_id,\n guild_id = guild_id,\n count = 1, \n timestamp = now.timestamp()).execute()\n else:\n ranking = query.get()\n ranking.count += 1\n ranking.timestamp = now.timestamp()\n ranking.save()\n\n def reduce_counting_table(self, user_id:int, key:str, guild_id:int):\n now = datetime.today()\n nowf = now.replace(hour=0,minute=0, second=0, microsecond=0).timestamp()\n nowe = (datetime.today() + timedelta(days=1)).timestamp()\n query = Ranking.select().where(Ranking.ranking_key == key, Ranking.user_id == user_id, Ranking.timestamp >= nowf and Ranking.timestamp < nowe)\n if not query.exists():\n ranking = Ranking.insert(rankingt_type = Util.RankingType.REACTION,\n ranking_key = key,\n user_id = user_id,\n guild_id = guild_id,\n count = -1, \n timestamp = now.timestamp()).execute()\n else:\n ranking = query.get()\n ranking.count -= 1\n ranking.timestamp = now.timestamp()\n ranking.save()\n \n def get_conting_table(self,guild_id:int, timestamp_from:float, timestamp_end:float):\n assert timestamp_from != timestamp_end\n result = list()\n for query in Ranking.select().where(Ranking.rankingt_type == Util.RankingType.REACTION, Ranking.guild_id == guild_id,Ranking.timestamp >= timestamp_from and Ranking.timestamp <= timestamp_end):\n result.append(query.get())\n if result != []:\n return result\n else:\n None\n \n def counting_table_clean(self):\n Ranking.update({Ranking.count:0}).execute()\n \n # 更新管理身分組的id, 通常是在bot被踢出又加入之後才會用到\n def update_permission_role(self, old_id: int, new_id: int, guild_id: int, role_type: Util.RoleType):\n query = PermissionRole.select().where(PermissionRole.role_type == role_type, PermissionRole.role_id == old_id)\n if query.exists():\n role = query.get()\n else:\n role = PermissionRole(role_type = role_type, guild_id = guild_id)\n role.role_id = new_id\n role.save()\n\n # 重置所有人的🍗\n def reset_everyone_token(self):\n Member.update({Member.token:100})\n \n def add_permission_role(self, guild: Guild, new_role: Role, role_type: Util.RoleType):\n role = PermissionRole(role_type = role_type, guild_id = guild.id, role_id = new_role.id)\n role.save()\n return role\n \n def has_permission(self, guild_id: int, role_id: int, type: Util.RoleType) -> bool:\n query = PermissionRole.select().where(PermissionRole.role_type == type, PermissionRole.guild_id == guild_id, PermissionRole.role_id == role_id)\n return query.exists()\n \n","sub_path":"python/bots/common/KFP_DB.py","file_name":"KFP_DB.py","file_ext":"py","file_size_in_byte":8909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"514400424","text":"#!/usr/bin/env python3\n\nimport json\nfrom pprint import pprint\n\ndef load_data():\n with open(\"data/bookmarks.json\") as f:\n data = json.loads(f.read())\n return data\n\ndef validate_category(category):\n if not \"category\" in category:\n print('Category does not have key \"category\"')\n pprint(category)\n exit(1)\n\n if not category[\"category\"]:\n print('Category \"category\" can not be false.')\n pprint(category)\n exit(1)\n\n if not \"slug\" in category:\n print('Category does not have key \"slug\"')\n pprint(category)\n exit(1)\n\n if not category[\"slug\"]:\n print('Category \"slug\" can not be false')\n pprint(category)\n exit(1)\n\n if not \"bookmarks\" in category:\n print('Category does not have key \"bookmarks\"')\n pprint(category)\n exit(1)\n\n if not category[\"bookmarks\"]:\n print('Category \"bookmarks\" can not be false')\n pprint(category)\n exit(1)\n\ndef validate_bookmark_file():\n try:\n data = load_data()\n\n if not isinstance(data, list):\n print(\"Root element is not a list.\")\n exit(1)\n\n for category in data:\n validate_category(category)\n except Exception as e:\n print(\"There was an exception when validating the bookmarks.\")\n print(e)\n exit(1)\n\nvalidate_bookmark_file()\n\nprint(\"Validation succeeded.\")\nexit(0)\n","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"134988191","text":"#!/usr/bin/python3.6\n\n# markets.py\n# High-level class for Markets data loading and management\n__version__ = \"1.2.0\"\n\nimport os, sys, time\nimport asyncio\nimport ccxt as ccxt_s\nimport ccxt.async_support as ccxt\nfrom ccxt.async_support import Exchange\nfrom colorama import init, Fore, Back, Style # color printing\nfrom datetime import datetime\nimport logging\n# our imports\nfrom messages import Messages #Jobs, Results\n\nroot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(root)\n#from settings.settings import Settings\n#from settings.settings import Settings\n#from database.database import Database\n\n\nclass Timer:\n def __init__(self):\n self.start = time.time()\n\n def tic(self):\n return \"%2.2f\" % (time.time() - self.start)\n\n\n\nclass Markets:\n\n fiat = ['USD','EUR','JPY','UAH','USDT','RUB','CAD','NZDT']\n allowed_tsyms = ['USD', 'USDT', 'BTC', 'ETH', 'DOGE', 'LTC', 'EUR', 'RUB'] # allowed symbols for convertion to\n\n def __init__(self): #! results\n self.exchanges = {} # exchanges - dict of ccxt objects\n self.exchanges_list = [] # exchanges_list - custom list of exchanges to filter (lowercase)\n self.ex_pairs = {} # ex_pairs - dict of exchanges which contains corresponding trading pairs\n self.my_tokens = [] # list of string values representing which token is allowed either on fsym or tsym\n self.last_fetch = {} # init dict with last fetches\n self._cache = []\n\n # self.results = results #! Messages(type=\"results\", \"emitter.ini\", exchange_name=\"history_results\", queue_name=\"history_results\")\n #self.db_context = db_context # database context\n #self._cache = db_context._cache # local cache for storing last access times to exchanges and pairs\n #self.config = Settings()\n #logging.basicConfig(filename=self.db_context.config.log_file, level=logging.INFO, format=u'%(filename)s:%(lineno)d %(levelname)-8s [%(asctime)s] %(message)s')\n init(convert=True) # colorama init \n print(f\"CCXT version: {Fore.GREEN+Style.BRIGHT+ccxt.__version__+Style.RESET_ALL}\")\n \n\n def _init_metadata(self, exchanges_list):\n \"\"\"\n used in _load_exchanges method to create list of ccxt market instances - self.exchanges[]\n\n :param exchanges_list: : list(str) - list of exchanges (lowercase strings)\n \"\"\" \n self.exchanges_list = exchanges_list\n for id in exchanges_list:\n exchange = getattr(ccxt, id)\n # this option enables the built-in rate limiter <<=============\n exchange.enableRateLimit = True, \n self.exchanges[id] = exchange() \n\n\n def __del__(self):\n if self.exchanges != {}:\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self._shutdown())\n\n\n async def _shutdown(self):\n \"\"\" Deletes all instances in self.exchanges list \"\"\"\n for ex in self.exchanges.items():\n print(\"Closing {}\".format(ex[0]))\n await ex[1].close()\n\n\n async def _load_exchange(self, exchange: object):\n \"\"\"\n Loads market metadata to exchange object, passed as parameter. Called from internal self._run_tasks() method.\n\n :param exchange: ccxt object instance\n \"\"\" \n #assert(self.exchanges != {}, \"exchanges are not loaded!\")\n try:\n await exchange.load_markets()\n print(\"{} market metadata loaded\".format(exchange.name))\n except Exception:\n print(f\"{exchange.name} {Fore.RED+Style.BRIGHT} market metadata NOT loaded{Style.RESET_ALL}\")\n\n\n async def _run_tasks(self, exchanges):\n \"\"\"\n Executes ccxt load_market() in parallel manner using asyncio.\n\n :param exchanges: - list(object) list of ccxt exchange objects. ex[0] - string name of exchange, ex[1] - ccxt object\n \"\"\" \n #assert(self.exchanges != {}, \"exchanges are not loaded!\")\n tasks = []\n i = 0\n for ex in exchanges.items():\n # i += 1\n tasks.append(asyncio.ensure_future(self._load_exchange(ex[1])))\n # if i == 9:\n # i = 0\n # asyncio.sleep(3)\n #await asyncio.gather(*tasks)\n await asyncio.wait(tasks)\n\n \n def load_exchanges(self, exchanges_list):\n \"\"\"\n Loads exchanges metadata self.exchanges (list of ccxt objects)\n\n :param exchanges_list: - list(str) list of ids of exchanges (look up exchange id here: https://github.com/ccxt/ccxt)\n \"\"\"\n if self.exchanges_list == []:\n self._init_metadata(exchanges_list) # init by exchanges list from settings.exchanges table\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(self._run_tasks(self.exchanges))\n #loop.close()\n except KeyboardInterrupt:\n print(\"Leaving by Ctrl-C...\")\n sys.exit()\n except Exception:\n print(\"Exception in markets.py:126\")\n\n def reload_pairs(self, my_tokens):\n \"\"\"\n Returns dict of exchanges, corresponding pairs.\n\n :param my_tokens: list(str) - list of string values representing which token is allowed either on fsym or tsym\n self.exchanges - dict of ccxt objects\n self.exchanges_list - custom list of exchanges to filter (lowercase)\n \"\"\"\n self.my_tokens = my_tokens\n for ex in self.exchanges_list:\n my_pairs = [sym.split('/') for sym in self.exchanges[ex].symbols \n if sym.split('/')[0] in my_tokens and sym.split('/')[1] in my_tokens]\n my_pairs = [x[0]+'/'+x[1] for x in my_pairs]\n self.ex_pairs[ex] = my_pairs\n\n\n\n def fetch_trades(self, exchange: str, pair: str, since: int, limit: int):\n \"\"\" \n\n Fetches last N trades for given exchange and pair. SYNCHRONOUS version!\n\n :param exchange: string value of ccxt exchange id (look up exchange id here: https://github.com/ccxt/ccxt)\n :param pair: string value for exchange pair (for example \"BTC/USDT\")\n :param limit: integer value for how many rows to fetch from the exchange\n \"\"\"\n ex_obj = getattr(ccxt_s, exchange)\n ex = ex_obj()\n # if ex == None:\n # ex.load_markets() # --------- WHAT FOR ???\n\n ex.enableRateLimit = True,\n histories = []\n #params = {'from_id': from_id } # exchange-specific non-unified parameter name\n\n try:\n histories = ex.fetch_trades(symbol=pair, since=since, limit=limit)\n\n except Exception as e:\n print(f\"Error in {__file__}.fetch_trades(). {Fore.YELLOW}{e}{Fore.RESET}\")\n\n return histories\n\n\n async def _get_history(self, exchange, pairs):\n \"\"\" \n Get historic data for MANY pairs from ONE single exchange \n\n :param exchange: string value of ccxt exchange id. Ex: \"hitbtc2\" \n :param pairs: list of string values for pairs. Ex. [\"BTC/USDT\", \"ETH/BTC\", \"LTC/BTC\"]\n\n (look up for full exchanges IDs list here: https://github.com/ccxt/ccxt)\n \"\"\"\n pairs_txt = \", \".join(pairs)\n print(f\"\\t{Style.DIM}{datetime.now()} Requested {Fore.YELLOW}{exchange}{Style.RESET_ALL}: {Fore.BLUE}{pairs_txt}{Style.RESET_ALL}\")\n for pair in pairs:\n try:\n timer = Timer()\n #rateLimit = self.exchanges[exchange].rateLimit\n since = self._cache[exchange][pair]\n #since = None\n if since == None:\n histories = await self.exchanges[exchange].fetch_trades(pair, limit=50)\n else:\n histories = await self.exchanges[exchange].fetch_trades(pair, since=since, limit=50)\n\n fetched_rows = len(histories)\n #print(f\"\\t{Style.DIM}{datetime.now()} Received {Fore.YELLOW}{exchange}: {Fore.BLUE}{pair} {Fore.WHITE}({len(histories)} rows, {timer.tic()} seconds){Style.RESET_ALL}\")\n logging.info(f\"{exchange}, {pair} fetched {fetched_rows} rows in {timer.tic()} seconds\")\n\n if histories != []:\n ## SAVING LAST ACCESS TIME TO CACHE...\n timer2 = Timer()\n self._cache[exchange][pair] = histories[-1]['timestamp'] + 1\n \n ## SAVING history TO DATABASE...\n # batch_cql = []\n # for x in histories:\n # batch_cql.append(f\"INSERT INTO {self.config.data_keyspace}.{self.config.history_table} (exchange, pair, ts, id, price, amount, type, side, insert_date) VALUES \" +\n # f\"('{exchange}', '{pair}', {x['timestamp']}, '{x['id']}', {x['price']}, {x['amount']}, '{x['type']}', '{x['side']}', toTimestamp(now()) )\")\n # #print(batch_cql)\n # self.db_context.batch_execute(batch_cql)\n # logging.info(f\"{exchange}, {pair} saved {fetched_rows} rows in {timer2.tic()} seconds\")\n \n # SENDING history to RabbitMQ\n # results.send(message=histories)\n\n print(f\"{exchange}, {pair} processed {fetched_rows} rows in {timer2.tic()} seconds\")\n logging.info(f\"{exchange}, {pair} processed {fetched_rows} rows in {timer2.tic()} seconds\")\n \n #print(f\"\\t{Style.DIM}{datetime.now()} Saved {exchange}: {pair} ({len(histories)} rows, {timer.tic()} seconds){Style.RESET_ALL}\")\n print(f\"\\t{Style.DIM}{datetime.now()} Received and Saved {Fore.YELLOW}{exchange}: {Fore.BLUE}{pair} {Fore.WHITE}({len(histories)} rows, {timer.tic()} seconds){Style.RESET_ALL}\")\n\n except Exception as e:\n print(f\"Error in {__file__}._get_history(). {Fore.YELLOW}{e}{Fore.RESET}\")\n logging.error(e)\n\n #finally:\n # await asyncio.sleep(self.exchanges[exchange].rateLimit/1000)\n #sleep(rateLimit/1000)\n\n \n async def _get_histories(self, job: str):\n \"\"\" \n Fetches history in parallel manner \n \n \"\"\"\n try:\n exchanges = job.keys()\n tasks = []\n for exchange in exchanges:\n pairs = list(job[exchange][\"pairs\"])\n tasks.append(asyncio.ensure_future(self._get_history(exchange, pairs)))\n\n except Exception as e:\n print(f\"Error in {__file__}._get_histories(). {Fore.YELLOW}{e}{Fore.RESET}\")\n \n finally:\n await asyncio.gather(*tasks)\n\n\n def process_job(self, job: str, db_context):\n \"\"\"\n Method collects historic and orderbook data from exchanges \n and saves it to database given by context parameter db_context.\n Returns large chunk of collected data by each requested pair.\n\n :param db_context: reference to Database object\n :param job: must contain json dictionary with the follownig structure:\n\n Example of job parameter:\n {\n \"exchange1\": {\n \"ratelimit\": 3000,\n \"pairs\": [\"BTC/USD\", \"BTC/ETH\", \"ETH/USD\"],\n \"timestamp\": \"2018-07-27 13:05:31\",\n }\n \"exchange2_id\": {\n \"ratelimit\": 2000,\n \"pairs\": [\"BTC/OMG\", \"BTC/ETH\"],\n \"timestamp\": 1234567890,\n },...\n }\n \"\"\"\n try:\n timer = Timer()\n if self.exchanges_list == {}:\n raise ValueError(\"Markets instance is not properly initialized! load_exchanges() must be called first!\")\n \n logging.info(f\"Job {job['timestamp']} added\")\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self._get_histories(job['job']))\n print(f\"{Fore.MAGENTA+Style.BRIGHT}\\tJob completed in {Fore.WHITE}{timer.tic()}{Fore.MAGENTA} seconds{Style.RESET_ALL}\")\n \n logging.info(f\"Job {job['timestamp']} completed in {timer.tic()} seconds\")\n\n except Exception as e:\n init(convert=True) # colorama init \n print(f\"{Fore.RED}{e}{Fore.RESET}\")\n \n\nif __name__ == '__main__':\n print(\"This file is not intened for direct execution\")","sub_path":"RabbitMQ/markets/markets.py","file_name":"markets.py","file_ext":"py","file_size_in_byte":12388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"45087784","text":"import argparse\nimport os\nimport re\n\n\nplatform_file = os.path.join('Lib', 'platform.py')\nget_version_file = os.path.join('Python', 'getversion.c')\n\n\ndef patch_platform(msg):\n with open(platform_file, 'r') as fh:\n lines = list(fh)\n\n lines_it = iter(lines)\n with open(platform_file, 'w') as fh:\n for line in lines_it:\n fh.write(line)\n if line.startswith('_sys_version_parser'):\n next_line = next(lines_it)\n fh.write(\" r'([\\w.+]+)\\s*\" + '(?:' + re.escape(' ' + msg) + ')?' + \"\\s*'\\n\")\n\ndef patch_get_version(msg):\n with open(get_version_file, 'r') as fh:\n content = list(fh)\n\n lines = iter(content)\n with open(get_version_file, 'w') as fh:\n for line in lines:\n if line.strip().startswith('PyOS_snprintf(version, sizeof(version)'):\n fh.write(' PyOS_snprintf(version, sizeof(version),\\n')\n fh.write(' \"%.80s ' + msg.replace('\"', '\\\\\"') + ' (%.80s) %.80s\",\\n')\n else:\n fh.write(line)\n\n\nmsg = os.environ.get('python_branding', '')\nif msg == '':\n msg = \"| packaged by conda-forge |\" \n\npatch_platform(msg)\npatch_get_version(msg)\n","sub_path":"recipe/brand_python.py","file_name":"brand_python.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"131093517","text":"import mechanize\nfrom BeautifulSoup import BeautifulSoup as Soup\nimport urllib\nimport re\nimport csv\n\n# This program visits and takes information from superherodb.com.\n# It takes the statistics of each superhero whose name begins with\n# the letter \"A\" and creates a .csv file of the resulting database. \n\ndef analyze(url):\n\n #Get tags\n strhtml = urllib.urlopen(url).read()\n soup = Soup(strhtml)\n name = soup.find(\"h1\").text\n divtags = soup.findAll(\"div\", {\"class\": re.compile(\"gridbarvalue.+\")})\n divtags = divtags[:6]\n \n #create CSV\n output = open(\"superman.csv\", \"wb\")\n writer = csv.writer(output)\n writer.writerows([[\"NAME\", \"INTELLIGENCE\", \"STRENGTH\", \"SPEED\", \"DURABILITY\", \"POWER\", \"COMBAT\"]])\n stats = [name]\n for divtag in divtags:\n stats.append(divtag.text)\n writer.writerows([stats])\n output.close()\n\n#analyze(\"http://www.superherodb.com/superman/10-791/\")\nurl = \"http://www.superherodb.com/characters\"\nhtmlstr = urllib.urlopen(url).read()\nsoup = Soup(htmlstr)\nlitags = soup.findAll(\"li\", {\"class\" : \"char-li\"})\n#print(litags)\nrows = [[\"NAME\", \"INTELLIGENCE\", \"STRENGTH\", \"SPEED\", \"DURABILITY\", \"POWER\", \"COMBAT\"]]\nfor litag in litags:\n href = litag.find('a').get('href')\n charurl = url + href\n heropage = urllib.urlopen(charurl).read()\n herosoup = Soup(heropage)\n name = herosoup.find(\"h1\").text\n if name[0] == \"B\":\n break\n name = [name]\n divtags = herosoup.findAll(\"div\", {\"class\" : re.compile(\"gridbarvalue.+\")})\n divtags = divtags[:6]\n stats = []\n empties = [\"\", \"\", \"\", \"\", \"\", \"\"]\n for divtag in divtags:\n stats.append(divtag.text)\n \n if len(stats) == 6:\n row = [name + stats]\n else:\n row = [name + empties]\n print(row)\n rows = rows + row\noutput = open(\"superheros.csv\", \"wb\")\nwriter = csv.writer(output)\nwriter.writerows(rows)\n","sub_path":"superhero.py","file_name":"superhero.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"11488639","text":"import os\nfrom shutil import copyfile\n\nsplits = ['train', 'val', 'test']\n\nfor split in splits:\n image_dir = os.path.join(os.getcwd(), 'images')\n output_dir = os.path.join(os.getcwd(), 'data')\n\n classes = set()\n split_path = os.path.join(os.getcwd(), 'splits', 'ravi', split)\n with open('{}.csv'.format(split_path)) as split_file:\n for line in split_file:\n if line[0] == 'n':\n class_name = line[:-1].split(',')[1]\n classes.add(class_name)\n\n with open('{}.txt'.format(split_path), 'w') as class_file:\n for class_name in classes:\n class_dir = os.path.join(output_dir, class_name)\n if not os.path.exists(class_dir):\n os.makedirs(class_dir)\n class_file.write('{}\\n'.format(class_name)) \n\n for image_name in os.listdir(image_dir):\n class_name = image_name[:9]\n if class_name in classes:\n #im = cv2.imread(os.path.join(image_dir, image_name))\n #im_resized = cv2.resize(im, (84, 84), interpolation=cv2.INTER_AREA)\n #cv2.imwrite(os.path.join(split_dir, class_name, image_name), im_resized)\n copyfile(os.path.join(image_dir, image_name),\n os.path.join(output_dir, class_name, image_name))\n \n","sub_path":"data/miniImagenet/move_images.py","file_name":"move_images.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"141766710","text":"#!/usr/bin/env python\n\"\"\"\nCopyright 2014 Wordnik, Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nclass Account:\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\"\"\"\n\n\n def __init__(self):\n self.swaggerTypes = {\n 'id': 'int',\n 'type': 'str',\n 'level': 'str',\n 'password': 'str',\n 'email': 'str',\n 'partnerId': 'int',\n 'proServiceId': 'int',\n 'firstName': 'str',\n 'useTimeout': 'bool',\n 'timeoutMinutes': 'int',\n 'lastName': 'str',\n 'autoClassify': 'bool',\n 'emailSuccess': 'bool',\n 'emailFailure': 'bool',\n 'emailCampaign': 'bool',\n 'features': 'str',\n 'releaseDestinations': 'list[str]',\n 'plan': 'str',\n 'addons': 'int',\n 'pendingAddons': 'int',\n 'pendingPlan': 'str',\n 'renews': 'bool',\n 'featuresAsMap': 'object',\n 'addOnSourceConnectionQuota': 'int',\n 'addOnStorageQuota': 'int',\n 'brandingLogoUrl': 'str',\n 'brandingName': 'str',\n 'createdDate': 'date-time',\n 'expiresDate': 'date-time',\n 'partnerAccountId': 'str',\n 'paymentDate': 'date-time',\n 'pendingPlanSourceConnectionQuota': 'int',\n 'pendingPlanStorageQuota': 'int',\n 'priorClientIp': 'int',\n 'priorLoginDate': 'int',\n 'referralSourceConnectionQuota': 'int',\n 'referralStorageQuota': 'int',\n 'subscriptionSourceConnectionQuota': 'int',\n 'subscriptionStorageQuota': 'int',\n 'socialMediaUrl': 'str',\n 'totalStorageQuota': 'int',\n 'totalStorageUsage': 'int',\n 'totalSourceConnectionQuota': 'int',\n 'totalSourceConnectionUsage': 'int',\n 'uploadEmail': 'str',\n 'uploadUsername': 'str'\n\n }\n\n\n #Unique Id. Returned as a string. [RO]\n self.id = None # int\n #FileThis account type. [RO]\n self.type = None # str\n #FileThis account level. [RO]\n self.level = None # str\n #FileThis account password. [WO]\n self.password = None # str\n #User email (Also used for FileThis login).\n self.email = None # str\n #Unique Id of account's Content Delivery Partner. Returned as a string. [RO]\n self.partnerId = None # int\n #Unique Id of Pro-Service. [RO]\n self.proServiceId = None # int\n #User's first name\n self.firstName = None # str\n #Flag to enable/disable session timeout\n self.useTimeout = None # bool\n #Session timeout duration\n self.timeoutMinutes = None # int\n #User's last name\n self.lastName = None # str\n #Whether to automatically classify (tag) fetched documents, or not.\n self.autoClassify = None # bool\n #Send email when a fetch succeeds\n self.emailSuccess = None # bool\n #Send email when a fetch fails\n self.emailFailure = None # bool\n #Flag to subscribe/unsubscribe from marketing emails\n self.emailCampaign = None # bool\n #Internal FileThis feature flags. Base64-encoded. [RO]\n self.features = None # str\n self.releaseDestinations = None # list[str]\n #Subscription plan. [RO]\n self.plan = None # str\n #Number of 'add-on' packages purchased. [RO]\n self.addons = None # int\n #Number of 'add-on' packages upon renewal. [RO]\n self.pendingAddons = None # int\n #Subscription plan on renewal. [RO]\n self.pendingPlan = None # str\n #Does the account's subscription plan renew?. [RO]\n self.renews = None # bool\n self.featuresAsMap = None # object\n #Additional source connections from add-ons. [RO]\n self.addOnSourceConnectionQuota = None # int\n #Additional storage from add-ons in Kilobytes. [RO]\n self.addOnStorageQuota = None # int\n #Pro-Services branding logo. [RO]\n self.brandingLogoUrl = None # str\n #Pro-Services branding name. [RO]\n self.brandingName = None # str\n #Date created. [RO]\n self.createdDate = None # date-time\n #Date that the user's subscription plan expires. [RO]\n self.expiresDate = None # date-time\n #Unique Id given to account by Content Delivery Partner. [RO]\n self.partnerAccountId = None # str\n #Date payment made for subscription plan. [RO]\n self.paymentDate = None # date-time\n #Source connection quota for pending plan. [RO]\n self.pendingPlanSourceConnectionQuota = None # int\n #Storage quota for pending plan in Kilobytes. [RO]\n self.pendingPlanStorageQuota = None # int\n #IP address of user's previous login. [RO]\n self.priorClientIp = None # int\n #Date of user's previous login. [RO]\n self.priorLoginDate = None # int\n #Connection quota from referrals. [RO]\n self.referralSourceConnectionQuota = None # int\n #Storage quota from referrals in Kilobytes. [RO]\n self.referralStorageQuota = None # int\n #Connection quota from subscription. [RO]\n self.subscriptionSourceConnectionQuota = None # int\n #Storage quota from subscription in Kilobytes. [RO]\n self.subscriptionStorageQuota = None # int\n #Link for user to share FileThis. [RO]\n self.socialMediaUrl = None # str\n #Total storage quota for account in Kilobytes. [RO]\n self.totalStorageQuota = None # int\n #Total storage usage by account in Kilobytes. [RO]\n self.totalStorageUsage = None # int\n #Total source connection quota for account. [RO]\n self.totalSourceConnectionQuota = None # int\n #Total storage quota for account. [RO]\n self.totalSourceConnectionUsage = None # int\n #Upload email address [RO]\n self.uploadEmail = None # str\n #Upload user name\n self.uploadUsername = None # str\n \n","sub_path":"filethis/target-code/python/models/Account.py","file_name":"Account.py","file_ext":"py","file_size_in_byte":6666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"591215736","text":"import time\n\n# Create a fake list of a million email accounts\n\nmailaccounts = list()\nfor count in range(1000000):\n mailaccounts.append(f\"{count+1}@email.com\")\n\n# Using List Comprehension for the large size of email mailaccounts\nsend_gen = [print(f\"Email sent to {mailaccounts}. \") for mailaccounts in mailaccounts]\n# Sending fake emails with a half a second wait delay\ntime.sleep(0.5)\n","sub_path":"massemailer.py","file_name":"massemailer.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"260483179","text":"import random\n## 함수 선언부\ndef findMinIndex(ary) :\n minIndex = 0\n for i in range(1, len(ary)) :\n if ( ary[minIndex] > ary [i]) :\n minIndex = i\n return minIndex\n\n\n## 전역변수 \nbefore = [random.randint(33,190) for _ in range(20)]\nafter = []\n\n\n## 메인 코드\nprint('정렬 전 ->', before)\n\n# before의 개수만큼 반복\n# 가장 작은 위치를 알아내기\n# 가장 작은 값을 after에 넣은 후, before에서는 지우기 \n\nfor _ in range(len(before)):\n minPos = findMinIndex(before)\n after.append(before[minPos])\n del(before[minPos])\n \n\nprint('정렬 후 ->', after)","sub_path":"code11-02.py","file_name":"code11-02.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"465491617","text":"class Solution(object):\n def missingNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n length = len(nums)\n max_sum = 0\n for x in range(length+1):\n max_sum += x\n true_sum = 0\n for x in nums:\n true_sum += x\n return max_sum-true_sum","sub_path":"missing-number/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326676222","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\n# Author Tom Fyuri, Marc-André Gasser\n# 2017 for use with Raspberry PI using LED Panels\n# Simple bitcoin usd ticker you're free to modify or share following code however you like.\n# Version 0.0.6.1\n\nimport threading\nimport pygame\nimport httplib\nimport urllib\nimport json\nimport time\nimport sys, os\nimport copy\nfrom pygame.locals import *\n\n# setup some global stuff\npygame.init()\nscreen = pygame.display.set_mode((1280,720),pygame.FULLSCREEN)\npygame.mouse.set_visible(False)\npygame.display.set_caption('BTC-e Live')\nsurface = pygame.Surface(screen.get_size())\nsurface = surface.convert()\nfont = pygame.font.Font(None, 280)\nfont2 = pygame.font.Font(None, 280)\nfont3 = pygame.font.Font(None, 280)\nupdate_time = 1500\ntickrate = 50\n\nclass currency_data():\n pass\n\nticker_data = [[\"bitcoin\",\"BTC\",\"\",\" B\",currency_data(),0],\n [\"ethereum\",\"ETH\",\"\",\"L\",currency_data(),0],\n [\"iota\",\"IOTA\",\"\",\"L\",currency_data(),0]]\n \ndef quit():\n pygame.quit(); sys.exit()\n\ndef get_price(which): # \n try:\n conn = httplib.HTTPSConnection(\"api.coinmarketcap.com\", timeout=4)\n conn.request(\"GET\", \"/v1/ticker/\"+which+\"/\")\n response = conn.getresponse()\n j = json.load(response)\n conn.close()\n return j\n except StandardError:\n return None\n\ndef process_input():\n key = pygame.key.get_pressed()\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE: quit()\n \ndef update_data():\n # basic stuff\n for i in ticker_data:\n currency_name = i[0]\n i[5] = copy.deepcopy(i[4]) # save previous?\n pdata = i[5]\n data = i[4]\n data.name = i[1]\n data.nom = i[2]\n data.nom2 = i[3]\n json_data = get_price(currency_name)\n if json_data is None:\n data.error = True\n else:\n data.error = False\n data.last = str(round(float(json_data[0]['price_usd']),2)).rjust(8,\" \")\n\n if ((hasattr(data,'error')) and (hasattr(pdata,'error')) and (data.error == False) and (pdata.error == False)):\n if hasattr(data,'last_color'):\n data.last_color = pdata.last_color\n else:\n # WHITE originally 255, 255, 255 \n data.last_color = (242, 204, 133)\n if (data.last > pdata.last):\n # GREEN originally 0, 255, 0\n data.last_color = (49, 181, 49)\n elif (data.last < pdata.last):\n # RED originally 255, 0, 0\n data.last_color = (209, 29, 29)\n\ndef redraw():\n surface.fill((0, 0, 0))\n pos = 0; pos2 = 0;\n for i in ticker_data:\n data = i[4]\n if data.error:\n text = font.render(\"noInet\", 1, (44, 96, 209))\n #x = ist abstand zu erstem wert\n text_pos = text.get_rect(); text_pos.y = pos; text_pos.x = 400\n surface.blit(text, text_pos)\n else:\n if (hasattr(data,'last_color')):\n color = data.last_color\n else:\n color = (242, 204, 133)\n text = font2.render(\"{0}{1}\".format(data.last,data.nom), 1, color)\n\n text_pos = text.get_rect(); text_pos.y = pos2; text_pos.x = 400\n surface.blit(text, text_pos)\n\n # name\n text = font.render(data.name, 1, (44, 96, 209))\n text_pos = text.get_rect(); text_pos.y = pos; text_pos.x = 0\n surface.blit(text, text_pos)\n # zeilenabstand von den 3 zeilen\n pos+=200\n pos2+=200\n\n screen.blit(surface, (0, 0))\n pygame.display.flip()\n\ndef main():\n clock = pygame.time.Clock()\n update_delay = 0\n update_data()\n redraw()\n while True:\n process_input()\n update_delay = update_delay + tickrate\n if (update_delay >= update_time):\n update_delay = 0\n update_data()\n redraw()\n clock.tick(tickrate)\n\nif __name__ == '__main__': main()\n\n\n\n\n","sub_path":"cryptodisplay.py","file_name":"cryptodisplay.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"39563695","text":"import uuid\nfrom typing import List, Optional\n\nfrom .utils import logging\n\nlogger = logging.get_logger(__name__)\n\n\nclass Conversation:\n \"\"\"\n Utility class containing a conversation and its history. This class is meant to be used as an input to the\n :class:`~transformers.ConversationalPipeline`. The conversation contains a number of utility function to manage the\n addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input\n before being passed to the :class:`~transformers.ConversationalPipeline`. This user input is either created when\n the class is instantiated, or by calling :obj:`conversational_pipeline.append_response(\"input\")` after a\n conversation turn.\n Arguments:\n text (:obj:`str`, `optional`):\n The initial user input to start the conversation. If not provided, a user input needs to be provided\n manually using the :meth:`~transformers.Conversation.add_user_input` method before the conversation can\n begin.\n conversation_id (:obj:`uuid.UUID`, `optional`):\n Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the\n conversation.\n past_user_inputs (:obj:`List[str]`, `optional`):\n Eventual past history of the conversation of the user. You don't need to pass it manually if you use the\n pipeline interactively but if you want to recreate history you need to set both :obj:`past_user_inputs` and\n :obj:`generated_responses` with equal length lists of strings\n generated_responses (:obj:`List[str]`, `optional`):\n Eventual past history of the conversation of the model. You don't need to pass it manually if you use the\n pipeline interactively but if you want to recreate history you need to set both :obj:`past_user_inputs` and\n :obj:`generated_responses` with equal length lists of strings\n Usage::\n conversation = Conversation(\"Going to the movies tonight - any suggestions?\")\n # Steps usually performed by the model when generating a response:\n # 1. Mark the user input as processed (moved to the history)\n conversation.mark_processed()\n # 2. Append a mode response\n conversation.append_response(\"The Big lebowski.\")\n conversation.add_user_input(\"Is it good?\")\n \"\"\"\n \n def __init__(\n self, text: str = None, conversation_id: uuid.UUID = None, past_user_inputs=None, generated_responses=None\n ):\n if not conversation_id:\n conversation_id = uuid.uuid4()\n if past_user_inputs is None:\n past_user_inputs = []\n if generated_responses is None:\n generated_responses = []\n \n self.uuid: uuid.UUID = conversation_id\n self.past_user_inputs: List[str] = past_user_inputs\n self.generated_responses: List[str] = generated_responses\n self.new_user_input: Optional[str] = text\n \n def __eq__(self, other):\n if not isinstance(other, Conversation):\n return False\n if self.uuid == other.uuid:\n return True\n return (\n self.new_user_input == other.new_user_input\n and self.past_user_inputs == other.past_user_inputs\n and self.generated_responses == other.generated_responses\n )\n \n def add_user_input(self, text: str, overwrite: bool = False):\n \"\"\"\n Add a user input to the conversation for the next round. This populates the internal :obj:`new_user_input`\n field.\n Args:\n text (:obj:`str`): The user input for the next conversation round.\n overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not existing and unprocessed user input should be overwritten when this function is called.\n \"\"\"\n if self.new_user_input:\n if overwrite:\n logger.warning(\n f'User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten '\n f'with: \"{text}\".'\n )\n self.new_user_input = text\n else:\n logger.warning(\n f'User input added while unprocessed input was existing: \"{self.new_user_input}\" new input '\n f'ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input'\n )\n else:\n self.new_user_input = text\n \n def mark_processed(self):\n \"\"\"\n Mark the conversation as processed (moves the content of :obj:`new_user_input` to :obj:`past_user_inputs`) and\n empties the :obj:`new_user_input` field.\n \"\"\"\n if self.new_user_input:\n self.past_user_inputs.append(self.new_user_input)\n self.new_user_input = None\n \n def append_response(self, response: str):\n \"\"\"\n Append a response to the list of generated responses.\n Args:\n response (:obj:`str`): The model generated response.\n \"\"\"\n self.generated_responses.append(response)\n \n def iter_texts(self):\n \"\"\"\n Iterates over all blobs of the conversation.\n Returns: Iterator of (is_user, text_chunk) in chronological order of the conversation. ``is_user`` is a\n :obj:`bool`, ``text_chunks`` is a :obj:`str`.\n \"\"\"\n for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses):\n yield True, user_input\n yield False, generated_response\n if self.new_user_input:\n yield True, self.new_user_input\n \n def __repr__(self):\n \"\"\"\n Generates a string representation of the conversation.\n Return:\n :obj:`str`:\n Example: Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user >> Going to the movies tonight - any\n suggestions? bot >> The Big Lebowski\n \"\"\"\n output = f\"Conversation id: {self.uuid} \\n\"\n for is_user, text in self.iter_texts():\n name = \"user\" if is_user else \"bot\"\n output += f\"{name} >> {text} \\n\"\n return output\n","sub_path":"roberta/tokenizer/Conversation.py","file_name":"Conversation.py","file_ext":"py","file_size_in_byte":6256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"326442561","text":"import numpy\nimport sys\nimport scipy.misc\nimport os.path\nimport tensorflow as tf\nimport argparse\n\nimport psnr\nimport ssim\n\n\ndef log10(x):\n\n numerator = tf.log(x)\n\n denominator = tf.log(tf.constant(10, dtype=numerator.dtype))\n\n return numerator / denominator\n\ndef pppsnr(im1, im2):\n\n img_arr1 = numpy.array(im1).astype('float32')\n\n img_arr2 = numpy.array(im2).astype('float32')\n\n mse = tf.reduce_mean(tf.squared_difference(img_arr1, img_arr2))\n\n psnr = tf.constant(255**2, dtype=tf.float32)/mse\n\n result = tf.constant(10, dtype=tf.float32)*log10(psnr)\n\n with tf.Session():\n\n result = result.eval()\n\n return result\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='metrics arguments')\n parser.add_argument('--a', type=str, default='refine',\n help='image_a')\n parser.add_argument('--b', type=str, default='/home/opt603/lst/code/SRN-Deblur/testing_res',\n help='image_b')\n args = parser.parse_args()\n return args\n\nargs = parse_args()\nimgName = args.a\npred_imgName = args.b\nreal = scipy.misc.imread(imgName, flatten=True).astype(numpy.float32)\npred = scipy.misc.imread(pred_imgName, flatten=True).astype(numpy.float32)\n\nwidth, height = pred.shape[1], pred.shape[0]\n\nprint('Resolution %d x %d' % (width, height))\n\nssim_value = ssim.ssim_exact(real/255, pred/255)\npsnr_value = psnr.psnr(real, pred)\n\nprint('psnr:%.5f ssim:%.5f' % (psnr_value, ssim_value))\n","sub_path":"metrics/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491645380","text":"'''\n\n运算符\t描述\t 示例\n==\t检查两个操作数的值是否相等,如果是则条件变为真。\t如a=3,b=3,则(a == b) 为 True\n!=\t检查两个操作数的值是否相等,如果值不相等,则条件变为真。\t如a=1,b=3,则(a != b) 为 True\n>\t检查左操作数的值是否大于右操作数的值,如果是,则条件成立。\t如a=7,b=3,则(a > b) 为 True\n<\t检查左操作数的值是否小于右操作数的值,如果是,则条件成立。\t如a=7,b=3,则(a < b) 为 False\n>=\t检查左操作数的值是否大于或等于右操作数的值,如果是,则条件成立。\t如a=3,b=3,则(a >= b) 为 True\n<=\t检查左操作数的值是否小于或等于右操作数的值,如果是,则条件成立。\t如a=3,b=3,则(a <= b) 为 True\n\n'''\n\na = 10\nb = 20\nprint(a==b)\n\n'''\n\n运算符\t逻辑表达式\t描述\t 实例\nand\t x and y\t 布尔\"与\":如果 x 为 False,x and y 返回 False,否则它返回 y 的值。\t True and False, 返回 False。\nor\t x or y\t 布尔\"或\":如果 x 是 True,它返回 True,否则它返回 y 的值。\t False or True, 返回 True。\nnot\t not x\t 布尔\"非\":如果 x 为 True,返回 False 。如果 x 为 False,它返回 True。\t not True 返回 False, not False 返回 True\n\n'''\n\na = 10\nb = 20\n\nif a > 0 and b > 10:\n print('对')\nelse:\n print('错')\n\n\n","sub_path":"basis/比较及逻辑关系符.py","file_name":"比较及逻辑关系符.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"484050870","text":"from flask import g, session, request, url_for, flash, redirect\nfrom snipey import app, meetup_oauth, model, controller\n\n\n@app.before_request\ndef before_request():\n \"\"\"\n If a user_id is specified in the session, fetch the user from the db\n \"\"\"\n g.user = None\n if 'user_id' in session:\n g.user = model.User.query.get(session['user_id']).first()\n\n\n@meetup_oauth.tokengetter\ndef get_meetup_token():\n \"\"\"\n This is used by the API to look for the auth token and secret it\n should use for API calls. During the authorization handshake a\n temporary set of token and secret is used, but afterwards this\n function has to return the token and secret. If you don't want to\n store this in the database, consider putting it into the session\n instead.\n \"\"\"\n oauth_secret = request.args.get('secret', '')\n oauth_token = request.args.get('token', '')\n\n if oauth_secret and oauth_token:\n return oauth_token, oauth_secret\n\n if g.user:\n return g.user.token, g.user.secret\n\n\n@app.route('/login')\ndef login():\n \"\"\"Calling into authorize will cause the OpenID auth machinery to kick\n in. When all worked out as expected, the remote application will\n redirect back to the callback URL provided.\n \"\"\"\n return meetup_oauth.authorize(callback=url_for('oauth_authorized',\n next=request.args.get('next') or request.referrer or None))\n\n\n@app.route('/logout')\ndef logout():\n session.pop('user_id', None)\n flash('You were signed out', 'alert-success')\n return redirect(request.referrer or url_for('index'))\n\n\n@app.route('/oauth-authorized')\n@meetup_oauth.authorized_handler\ndef oauth_authorized(resp):\n if resp is None or resp['member_id'] is None:\n flash(u'You denied the request to sign in.', 'alert-error')\n return redirect(url_for('index'))\n\n meetup_id = resp['member_id']\n oauth_token = resp['oauth_token']\n oauth_secret = resp['oauth_token_secret']\n\n user = controller.fetch_user(meetup_id, (oauth_token, oauth_secret))\n\n session['user_id'] = user.id\n flash('You were signed in', 'alert-info')\n\n return redirect(url_for('snipe'))\n","sub_path":"snipey/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"592604375","text":"# Task 1\ns = 'komenchukoleh'\nh = s[:2] + s[11:]\nprint(h)\n\na = 'my'\nprint((a[:] + a[:]))\n\nx = 'x'\nprint(x[1:])\n\n# Task 2\nuserNumber = input('Введите номер телефона: ')\nif len(userNumber) == 10:\n print('Номер введен верно!')\nelse:\n print('Неверный ввод номера!')\n\n# Task 3\nimport random\n\ngamer_attempt = 0\n\ns = int(random.uniform(1, 10))\nwhile gamer_attempt < 6:\n\n print('Как ты думаешь, какое число?')\n point = int(input('Я думаю, что это число: '))\n\n gamer_attempt = gamer_attempt + 1\n\n if point < s:\n print('Число больше!')\n\n if point > s:\n print('Число меньше!')\n\n if point == s:\n break\n\nif point == s:\n gamer_attempt = str(gamer_attempt)\n print('Ты угадал число')\n\nif point != s:\n s = str(s)\n print('Жаль, попытки закончились')\n\n# Task 4\nuser_Name = 'oleh'\nuser_InputName = input('Здравствуйте, введите Ваше имя для идентификации: ')\ns = user_InputName.lower()\nif s == user_Name:\n print(\"Вход в систему выполнен!\")\nelse:\n print('Неверно введеное имя')\n","sub_path":"lesson-3-homework.py","file_name":"lesson-3-homework.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"23622577","text":"# user_input = input(\"Please enter the string you want to print:\")\n# print(user_input)\n#\n#\n# # frozenset\n# # tuple of vowels\n# vowels = ('a', 'e', 'i', 'o', 'u')\n#\n# fSet = frozenset(vowels)\n# print('The frozen set is:', fSet)\n# print('The empty frozen set is:', frozenset())\n\nx = 10\ny = 5\nwhile x > y:\n print (\"X is greater than %s\" %(y))\n y += 1\n","sub_path":"Udemy/Network Automation Tool/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"501628584","text":"\"\"\"@summary: Rassemble les états qui redistribuent les dégâts subits autour du blessé.\"\"\"\n\nfrom Etats.Etat import Etat\nfrom Effets.EffetDegats import EffetDegats\nimport Zones\n\n\nclass EtatRedistribuerPer(Etat):\n \"\"\"@summary: Classe décrivant un état qui redistribue en partie les dégâts\n subits autour du personnage endommagé\"\"\"\n\n def __init__(self, nom, debDans, duree, pourcentage, cibles, tailleZone, lanceur=None, desc=\"\"):\n \"\"\"@summary: Initialise l'état.\n @nom: le nom de l'état, servira également d'identifiant\n @type: string\n @debDans: le nombre de début de tour qui devront passés pour que l'état s'active.\n @type: int\n @duree: le nombre de début de tour après activation qui devront passés\n pour que l'état se désactive.\n @type: int\n\n @pourcentage: le pourcentage des gégâts qui sera redistribué\n @type: int\n @cibles: la liste des cibles qui peuvent être touchés par la redistribution\n @type: string, les cibles séparées par des '|'\n @tailleZone: le rayon du cercle de redistribution\n @type: int\n\n @lanceur: le joueur ayant placé cet état\n @type: Personnage ou None\n @desc: la description de ce que fait l'états pour affichage.\n @type: string\"\"\"\n self.pourcentage = pourcentage\n self.tailleZone = tailleZone\n self.cibles = cibles\n super().__init__(nom, debDans, duree, lanceur, desc)\n\n def __deepcopy__(self, memo):\n \"\"\"@summary: Duplique un état (clone)\n @return: Le clone de l'état\"\"\"\n return EtatRedistribuerPer(self.nom, self.debuteDans, self.duree, self.pourcentage,\n self.cibles, self.tailleZone, self.lanceur, self.desc)\n\n def triggerApresSubirDegats(self, cibleAttaque, niveau, attaquant, totalPerdu):\n \"\"\"@summary: Un trigger appelé pour tous les états du joueur attaqué\n lorsque des dommages vont être subits.\n Redistribue une partie des dégâts qui vont être subit dans la zone définit.\n @cibleAttaque: le joueur qui va subir les dégâts\n @type: joueur\n @niveau: La grille de jeu\n @type: Niveau\n @totalPerdu: Le total de vie que le joueur va subir.\n @type: int\n @attaquant: Le joueur à l'origine de l'attaque\n @type: Personnage\"\"\"\n totalPerdu = int(totalPerdu*(self.pourcentage/100.0))\n effetRedistribution = EffetDegats(totalPerdu, totalPerdu, \"renvoie\",\n zone=Zones.TypeZoneCercle(self.tailleZone),\n bypassDmgCalc=True,\n cibles_possibles=self.cibles, cibles_exclues=\"Lanceur\")\n niveau.lancerEffet(effetRedistribution, cibleAttaque.posX, cibleAttaque.posY,\n \"Redistribution\", cibleAttaque.posX, cibleAttaque.posY)\n","sub_path":"Etats/EtatRedistribuerPer.py","file_name":"EtatRedistribuerPer.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"623284843","text":"from lettuce import *\nfrom nose.tools import assert_equal\n\nimport os\n\n#from observer.time import *\n\n@step(u'Given I run have system time \"([^\"]*)\"')\ndef given_i_run_have_system_time_group1(step, time):\n\tos.system('sudo hwclock --set --date=\"12/14/14 ' + str(time) + '\"')\n\tos.system('sudo hwclock --hctosys')\n\n@step(u'When I run the script \"([^\"]*)\"')\ndef when_i_run_the_script_group1(step, script):\n os.system('observer/' + script)\n\n@step(u'Then I get the following time results from \"([^\"]*)\":')\ndef then_i_get_the_following_time_results_from_group1(step, time):\n\tfile = open(\"observer/\" + time, \"r\")\n\tfor line in step.hashes:\n\t\tf_line = file.readline().splitlines()\n\t\tassert_equal(f_line, line.values())\n\tfile.close()\n\n","sub_path":"Observer/test/bdd/steps/observer_steps.py","file_name":"observer_steps.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"508399793","text":"import xadmin\nfrom .models import *\n\nclass CityInfoXadmin(object):\n list_display = ['name', 'add_time']\n\n\nclass OrgInfoXadmin(object):\n list_display = ['image', 'name', 'course_num', 'study_num', 'love_num', 'click_num', 'category', 'cityinfo']\n style_fields = {'detail': 'ueditor'}\n\n\nclass TeacherInfoXadmin(object):\n list_display = ['image', 'name', 'work_year', 'work_position', 'age', 'gender', 'love_num', 'click_num']\n\n\nxadmin.site.register(CityInfo,CityInfoXadmin)\nxadmin.site.register(OrgInfo,OrgInfoXadmin)\nxadmin.site.register(TeacherInfo,TeacherInfoXadmin)","sub_path":"FangEdu/apps/orgs/adminx.py","file_name":"adminx.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"106691591","text":"#coding=utf-8\nfrom pyAuto.util.httpInterface import HttpInterface\nfrom pyAuto.util.TimeStamp import TimeStamp\nfrom pyAuto.util.Md5 import Md5\nfrom pyAuto.util.logOutput import LogOutput\nimport urllib\nclass InterfaceTest2(object):\n def __init__(self):\n log=LogOutput()\n dtObject=TimeStamp()\n url=\"http://test.ishop-city.com/admin/getUserInfo.action\"\n timeStamp=dtObject.timeStamp()\n method=\"GET\"\n dataDict={\n \"mcode\":\"hfbh\",\n \"dt\":timeStamp,\n \"ver\":\"1.0\",\n \"method\":\"info\",\n \"url\":\"http://www.baidu.com\"\n }\n headers={\"Content-type\" :\"application/json\" }\n dictSortKey_req=sorted(dataDict.items(), key=lambda dataDict:dataDict[0])\n listSortKey_req=[]\n for i in dictSortKey_req:\n listSortKey_req.append(\"=\".join(i))\n md5Str=\"&\".join(listSortKey_req)+\"&key=526ca4b758aea23d95725280bdef812e\"\n log.basicLog(md5Str)\n md5=Md5()\n sign=md5.md5(md5Str)\n parameters={\n \"mcode\":urllib.quote(dataDict[\"mcode\"]),\n \"dt\":urllib.quote(dataDict[\"dt\"]),\n \"ver\":urllib.quote(dataDict[\"ver\"]),\n \"method\":urllib.quote(dataDict[\"method\"]),\n \"url\":urllib.quote(dataDict[\"url\"]),\n \"sign\":sign\n }\n log.basicLog(parameters)\n urlAbsolutely=url+\"?\"+urllib.urlencode(parameters)\n log.basicLog(urlAbsolutely)\n conn=HttpInterface(urlAbsolutely, method)\n log.basicLog(conn.getHost())\n log.basicLog(conn.getPath())\n log.basicLog(conn.getParametersUrlencodeDeal())\n data=conn.request()\n log.basicLog(data)\nif __name__==\"__main__\":\n test=InterfaceTest2()","sub_path":"InterfaceTest2.py","file_name":"InterfaceTest2.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"308096839","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : exp.py\n@Time : 2021/05/28 17:08:14\n@Author : eur1ka \n@Version : 2.7\n@Contact : eur1ka@163.com\n'''\n\n# here put the import lib\n\nfrom pwn import *\nfrom LibcSearcher import *\nimport pwnlib\ndebug = 0\ncontext.log_level = 'debug'\ncontext.arch = 'amd64'\nif debug:\n sh = process('PicoCTF_2018_are_you_root')\n libc = ELF('/lib/x86_64-linux-gnu/libc.so.6')\nelse:\n IP = 'node3.buuoj.cn'\n port = 27964\n sh = remote(IP,port)\n libc = ELF('/home/eur1ka/Desktop/Pwn/libc_file/18-64-libc-2.27.so')\nelf = ELF('PicoCTF_2018_are_you_root')\n\ndef login(name):\n sh.recvuntil(\">\")\n content = 'login ' + name\n sh.sendline(content)\n\ndef reset():\n sh.recvuntil(\">\")\n sh.sendline(\"reset\")\n\ndef get_flag():\n sh.recvuntil(\">\")\n sh.sendline('get-flag')\n\npayload = 'a' * 8 + p32(0x5)\nlogin(payload)\nreset()\nlogin(\"eur1ka\")\nget_flag()\nsh.interactive()","sub_path":"2018_picoctf/are you root/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"272827912","text":"#!/usr/bin/python3.4\n# -*- coding: UTF-8 -*-\n'''\nCreated on Feb 23, 2015\nhttp://en.wikipedia.org/wiki/Cross-site_scripting\nhttp://stackoverflow.com/questions/964459/how-to-remove-text-between-script-and-script-using-python\nhttp://stackoverflow.com/questions/590747/using-regular-expressions-to-parse-html-why-not\n'''\n# enable debugging\nimport cgitb\ncgitb.enable()\n\n\nimport getEntities\nimport commonVariables as comm\nfrom lxml.html import parse\n\n\n\ndef readHtmlPage(htmlurl, readedPage, ontologyData):\n try:\n sentences = set()\n root = parse(htmlurl).getroot()\n #if the root is null, the html is incorrectly formed\n if(root is not None):\n for element in root.iter(\"head\"):\n element.drop_tree()\n for element in root.iter(\"script\"):\n element.drop_tree()\n for element in root.iter(\"style\"):\n element.drop_tree()\n for element in root.iter(\"noscript\"):\n element.drop_tree()\n for element in root.iter(\"input\"):\n element.drop_tree()\n for element in root.iter(\"form\"):\n element.drop_tree()\n for element in root.iter(\"title\"):\n element.drop_tree()\n for element in root.iter(\"img\"):\n element.drop_tree()\n \n for element in root.iter(\"body\"):\n try:\n sentences.add(element.text_content())\n except:\n pass\n if(len(sentences) > 0): \n lsent = list(sentences)\n for lau in lsent:\n if(lau != \"\"):\n laused = comm.replaceToPunkts(lau)\n for s6ne in laused:\n getEntities.getEntities(htmlurl, s6ne.strip(), ontologyData)\n except:\n comm.printException(comm.pathToSaveParsingErrors, \"read_html.py \" + htmlurl)\n pass\n \n\n\n","sub_path":"in1PC/rdfgenerator/read_html.py","file_name":"read_html.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"617124607","text":"from django.urls import path, re_path\n\nfrom . import views\n\nurlpatterns = [\n path('',views.index,name=\"\"),\n path('sendOTP',views.sendOTP,name=\"sendOTP\"),\n path('verifyemail',views.verifyemail,name=\"verifyemail\"),\n path('signup',views.signup,name=\"signup\"),\n path('login',views.login,name=\"login\"),\n path('logout',views.logout,name=\"logout\"),\n path('signupwithemail',views.signupwithemail,name=\"signupwithemail\")\n\n]","sub_path":"authentication/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"212606929","text":"import os\nimport shutil\nimport zipfile\nimport re\n\ndef clean_roms():\n path = '/path/to/source'\n new_path = '/path/to/destination'\n ideal_tags = {\n '(4)':'NTSC',\n '(U)':'NTSC',\n '(5)':'NTSC',\n '(E)':'PAL', \n '(UK)':'PAL',\n '(UE)':'PAL'\n }\n verified = '[!]'\n for root, dirs, files in os.walk(path):\n #create new folder structure\n for d in dirs:\n system_path = new_path + d\n if not os.path.isdir(system_path):\n os.mkdir(system_path)\n for k, v in ideal_tags.items():\n tag_folder = system_path + '/' + v\n if not os.path.isdir(tag_folder):\n os.mkdir(tag_folder)\n\n for f in files:\n src = os.path.join(root, f)\n # ignore bad dumps\n if re.search(r'\\[(b.)\\]', f):\n pass\n # sort by tags\n else:\n if (verified in f):\n if (('(4)' in f) or ('(5)' in f) or ('(U)' in f)):\n parent = new_path + os.path.basename(os.path.dirname(src)) + '/NTSC'\n dest = os.path.join(parent, f)\n print(src + ' --> ' + dest)\n shutil.copy2(src, dest)\n elif (('(E)' in f) or ('(UK)' in f) or ('(UE)' in f)):\n parent = new_path + os.path.basename(os.path.dirname(src)) + '/PAL'\n dest = os.path.join(parent, f)\n print(src + ' --> ' + dest)\n shutil.copy2(src, dest)\n \n \nclean_roms()\n","sub_path":"rom-sort.py","file_name":"rom-sort.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"171217317","text":"#!/usr/bin/env python3\n\nimport unittest\n\nfrom lib.graph import Node, Edge, Graph\nfrom algo.graph import connectedComponent, \\\n breadthFirstSearch\n\n\nclass GraphAlgorithms(unittest.TestCase):\n\n def setUp(self):\n self.datapath = './data'\n\n def testConnectedComponent(self):\n g = Graph(self.datapath + '/1.dat')\n\n self.assertEqual(\n sorted([u.id for u in connectedComponent(g, g.nodes[0])]),\n sorted(['s', 'a', 'b', 'c', 'd', 'e', 't'])\n )\n\n def testBreadthFirstSearch(self):\n # g = Graph(self.datapath + '/1.dat')\n g = Graph()\n g.gen()\n g.render(directed=False)\n\n layers = breadthFirstSearch(g, g.nodes[0])\n\n for l in layers:\n print(str([u.id for u in l]))\n\n\ndef runTests():\n unittest.main()\n\n\nif __name__ == '__main__':\n runTests()\n","sub_path":"test/algo/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"495394534","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Shiyu Huang \n@contact: huangsy13@gmail.com\n@file: setup.py.py\n\"\"\"\n\nimport setuptools\nfrom os import path\nimport re\nfrom codecs import open\n\npackages_name = 'TART'\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, packages_name, '__init__.py'), encoding='utf-8') as f:\n version = re.search(r'__version__ = \\'(.*?)\\'', f.read()).group(1)\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=packages_name,\n version=version,\n author=\"Shiyu Huang\",\n author_email=\"huangsy13@gmail.com\",\n description=packages_name+\" program\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n \n license='MIT',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: MIT License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n # 'Programming Language :: Python :: 2',\n # 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3'\n ],\n\n keywords=packages_name,\n\n packages=setuptools.find_packages(exclude=['examples']),\n\n install_requires=['numpy',\n 'six'],\n) \n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"333758528","text":"def _new_cert(self):\n '\\n Create a new certificate based on the csr.\\n Return the certificate object as dict\\n https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-6.5\\n '\n openssl_csr_cmd = [self._openssl_bin, 'req', '-in', self.csr, '-outform', 'DER']\n (_, out, _) = self.module.run_command(openssl_csr_cmd, check_rc=True)\n new_cert = {\n 'resource': 'new-cert',\n 'csr': nopad_b64(to_bytes(out)),\n }\n (result, info) = self.account.send_signed_request(self.directory['new-cert'], new_cert)\n chain = []\n if ('link' in info):\n link = info['link']\n parsed_link = re.match('<(.+)>;rel=\"(\\\\w+)\"', link)\n if (parsed_link and (parsed_link.group(2) == 'up')):\n chain_link = parsed_link.group(1)\n (chain_result, chain_info) = fetch_url(self.module, chain_link, method='GET')\n if (chain_info['status'] in [200, 201]):\n chain = [chain_result.read()]\n if (info['status'] not in [200, 201]):\n self.module.fail_json(msg='Error new cert: CODE: {0} RESULT: {1}'.format(info['status'], result))\n else:\n return {\n 'cert': result,\n 'uri': info['location'],\n 'chain': chain,\n }","sub_path":"Data Set/bug-fixing-4/80361ce4da747547fac3b6ab823c900479117257-<_new_cert>-fix.py","file_name":"80361ce4da747547fac3b6ab823c900479117257-<_new_cert>-fix.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"567912064","text":"# -*- coding: utf-8 -*-\n\nfrom tkinter import ttk\nfrom tkinter import *\nfrom PIL import Image\nfrom PIL import ImageTk\n\nimport sqlite3\n\n\n\nclass Chamado:\n\n nome_banco = 'chamados.db'\n\n\n def __init__(self, window):\n\n\n #criação do banco\n self.criar_banco()\n\n #inicialização da tela\n self.tela = window\n self.tela.title('REGISTRO DE CHAMADOS')\n\n\n # criação de um frame\n frame = LabelFrame(self.tela, text ='REGISTRO DE CHAMADOS', fg= 'pink')\n frame.grid(row = 0, column = 0, columnspan = 3, pady = 40) # padding: para todos os lados\n\n img = ImageTk.PhotoImage(Image.open('download.jpeg'))\n panel = ttk.Label(frame, image=img).grid(row = 1, column = 1)\n\n # campo título\n Label(frame, text = 'Título: ').grid(row = 1, column = 0)\n self.titulo = Entry(frame)\n self.titulo.focus()\n self.titulo.grid(row = 1, column = 1)\n\n # campo descrição\n Label(frame, text = 'Descrição: ').grid(row = 2, column = 0)\n self.descricao = Entry(frame)\n #self.descricao.focus()\n self.descricao.grid(row = 2, column = 1)\n\n\n # botão acicionar chamado\n ttk.Button(frame, text = 'Registrar Chamado', command = self.adicionar_chamado).grid(row = 3, columnspan = 2, sticky =W + E)\n # posicionamento do texto dentro do botão N, E, S, W, NE, NW, SE, and SW\n\n # mesagem de retorno na tela\n self.mensagem = Label(text ='', fg ='red') # foregound color (cor de fundo)\n self.mensagem.grid(row = 3, column = 0, columnspan = 2, sticky =W + E)\n\n # tabela de dados\n self.tabela = ttk.Treeview(height = 20, columns = 2)\n self.tabela.grid(row = 4, column = 0, columnspan = 2)\n self.tabela.heading('#0', text ='Título', anchor = CENTER)\n self.tabela.column(\"#0\", minwidth=0, width=400, stretch=NO)\n self.tabela.heading('#1', text ='Descrição', anchor = CENTER)\n self.tabela.column(\"#1\", minwidth=0, width=400, stretch=NO)\n\n\n # botões da tabela\n ttk.Button(text = 'Remover', command = self.remover_chamado).grid(row = 5, column = 0, sticky =W + E)\n ttk.Button(text = 'Editar', command = self.editar_chamado).grid(row = 5, column = 1, sticky =W + E)\n\n # listar chamados\n self.listar_chamados()\n\n # criação do banco\n def criar_banco(self):\n conn = sqlite3.connect('chamados.db')\n cursor = conn.cursor()\n cursor.execute(\"CREATE TABLE IF NOT EXISTS `chamado` (cham_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, titulo TEXT, descricao TEXT)\")\n\n # execução de comando\n def execucao_comando(self, query, parameters = ()):\n with sqlite3.connect(self.nome_banco) as conn:\n cursor = conn.cursor()\n result = cursor.execute(query, parameters)\n conn.commit()\n return result\n\n # listar chamados\n def listar_chamados(self):\n # cleaning Table\n dados = self.tabela.get_children()\n for elemento in dados:\n self.tabela.delete(elemento)\n # getting data\n comando_busca = 'SELECT titulo, descricao FROM chamado ORDER BY titulo DESC'\n elementos_retornados = self.execucao_comando(comando_busca)\n # filling data\n for elemento in elementos_retornados:\n self.tabela.insert(\"\" , 0, text=elemento[0], values=(elemento[1],))\n\n # validação\n def validar(self):\n tamTitulo = len(self.titulo.get())\n tamDesc = len(self.descricao.get())\n return tamTitulo > 9 and tamDesc > 14\n\n def adicionar_chamado(self):\n if self.validar():\n comando_insercao = 'INSERT INTO chamado VALUES(NULL, ?, ?)'\n parametros = (self.titulo.get(), self.descricao.get())\n self.execucao_comando(comando_insercao, parametros)\n self.mensagem['text'] = 'Chamado {} adicionado com sucesso'.format(self.titulo.get())\n self.titulo.delete(0, END)\n self.descricao.delete(0, END)\n else:\n self.mensagem['text'] = 'Todos os dados são necessários'\n self.listar_chamados()\n\n def remover_chamado(self):\n self.mensagem['text'] = ''\n try:\n self.tabela.item(self.tabela.selection())['text'][0]\n except IndexError as e:\n self.mensagem['text'] = 'Selecione um chamado'\n return\n self.mensagem['text'] = ''\n name = self.tabela.item(self.tabela.selection())['text']\n query = 'DELETE FROM chamado WHERE titulo = ?'\n self.execucao_comando(query, (name,))\n self.mensagem['text'] = 'Chamado {} removido com sucesso'.format(name)\n self.listar_chamados()\n\n def editar_chamado(self):\n self.mensagem['text'] = ''\n try:\n self.tabela.item(self.tabela.selection())['values'][0]\n except IndexError as e:\n self.mensagem['text'] = 'Selecione um chamado'\n return\n titulo_antigo = self.tabela.item(self.tabela.selection())['text']\n descricao_antiga = self.tabela.item(self.tabela.selection())['values'][0]\n self.tela_edicao = Toplevel()\n self.tela_edicao.title = 'Editar chamado'\n # título antigo\n Label(self.tela_edicao, text ='Título antigo:').grid(row = 0, column = 1)\n Entry(self.tela_edicao, textvariable = StringVar(self.tela_edicao,\n value = titulo_antigo), state ='readonly').grid(row = 0, column = 2)\n # título novo\n Label(self.tela_edicao, text ='Título novo:').grid(row = 1, column = 1)\n titulo_novo = Entry(self.tela_edicao)\n titulo_novo.grid(row = 1, column = 2)\n\n #descrição antiga\n Label(self.tela_edicao, text ='Descrição antiga:').grid(row = 2, column = 1)\n Entry(self.tela_edicao, textvariable = StringVar(self.tela_edicao,\n value = descricao_antiga), state ='readonly').grid(row = 2, column = 2)\n # descrição nova\n Label(self.tela_edicao, text ='Descrição nova:').grid(row = 3, column = 1)\n descricao_nova= Entry(self.tela_edicao)\n descricao_nova.grid(row = 3, column = 2)\n\n Button(self.tela_edicao, text ='Atualizar',\n command = lambda: self.editar_chamado_banco(titulo_novo.get(),\n titulo_antigo, descricao_nova.get())).grid(row = 4, column = 2, sticky = W)\n self.tela_edicao.mainloop()\n\n def editar_chamado_banco(self, novo_titulo, titulo, nova_descricao):\n comando_atualizacao = 'UPDATE chamado SET titulo = ?, descricao = ? WHERE titulo = ?'\n parametros = (novo_titulo, nova_descricao, titulo)\n self.execucao_comando(comando_atualizacao, parametros)\n self.tela_edicao.destroy()\n self.mensagem['text'] = 'Chamado{} atualizado com sucesso'.format(titulo)\n self.listar_chamados()\n\nif __name__ == '__main__':\n tela = Tk()\n aplicacao = Chamado(tela)\n tela.mainloop()","sub_path":"gui/tela_cadastro_chamado.py","file_name":"tela_cadastro_chamado.py","file_ext":"py","file_size_in_byte":6979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"510717093","text":"import app.globals\nimport sys\n\ndef invertedIndex(sfname):\n client = app.globals.client\n db = app.globals.db\n iidx = app.globals.iidx\n with open('uploads/' + sfname, 'r', errors='ignore') as f:\n line = f.readline()\n c = 0\n while line:\n temp = line.strip()\n if c == 0:\n j = {\n 'filename':sfname,\n 'Line0':temp\n }\n db.docs.insert_one(j)\n else:\n j = {\n 'Line{}'.format(c): temp\n }\n db.docs.update_one({'filename':sfname}, {\"$set\": j})\n\n for word in temp.strip(' .').split():\n word = word.lower()\n if word in iidx:\n iidx[word].append((sfname, c))\n else:\n iidx[word] = [(sfname, c)]\n line = f.readline()\n c += 1\n for k,v in iidx.items():\n continue\n #print(str(k) + \" : \" + str(v), file=sys.stderr)\n\ndef search(term):\n client = app.globals.client\n db = app.globals.db\n iidx = app.globals.iidx\n res = app.globals.res\n res = []\n if term not in iidx:\n return []\n else:\n print(\"OCCURRENCES: \" + str(iidx[term]))\n for occ in iidx[term]:\n for d in db.docs.find({'filename':occ[0]}):\n #print(d, file=sys.stderr)\n res.append( ( occ[0], str(occ[1]), d['Line'+str(occ[1])]) )\n print(\"res: \" + str(res), file=sys.stderr)\n return res\n","sub_path":"app/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"11731554","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.shortcuts import render,redirect, get_object_or_404\nfrom pyshorteners import Shortener\nfrom django.conf import settings\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404,JsonResponse\nimport random, string, json\nfrom .models import Check\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db import connection\nimport logging\nlogger = logging.getLogger(__name__)\nfrom hashids import Hashids\nfrom django.core import serializers\n\n\n# def short_url_way(request):\n# #import pdb;pdb.set_trace()\n# url = request.POST.get('url', '')\n# if not url == '' and not ' ' in url:\n# try:\n# obj = Check.objects.get(http_url=url)\n# hashid = obj.short_id\n# except Exception as e:\n# obj = Check.objects.create(http_url=url)\n# hashids = Hashids(min_length=4)\n# hashid = hashids.encode(obj.id)\n# obj.short_id = hashid\n# obj.save()\n# url_link = {'link': settings.SITE_URL + '/r/' + hashid, 'id':obj.id}\n# return HttpResponse(json.dumps(url_link), content_type=\"application/json\")\n# return HttpResponse(json.dumps({'error':\"error occured\"}), content_type=\"application/json\")\n\n\ndef page(request):\n return render(request, 'page.html')\n\n\ndef short_url_way(request):\n url = request.POST.get('url', '')\n if not url == '' and not ' ' in url:\n if 'http' not in url:\n url= 'http://' + url\n obj, created = Check.objects.get_or_create(http_url=url)\n else:\n obj, created = Check.objects.get_or_create(http_url=url)\n hashid = obj.short_id\n new_hash = 0\n if created:\n new_hash = 1\n hashids = Hashids(min_length=7)\n hashid = hashids.encode(obj.id)\n obj.short_id = hashid\n obj.save()\n url_link = {'status_code': 200, 'link': settings.SITE_URL + '/' + hashid, 'long_url': url, \"hash\": hashid, \"new_hash\":new_hash }\n return HttpResponse(json.dumps(url_link), content_type=\"application/json\")\n return HttpResponse(json.dumps({'status_code': 500, 'status_txt': \"INVALID_ARG_URL\"}), content_type=\"application/json\")\n\n\ndef redirect_original(request, slug):\n try:\n url = get_object_or_404(Check, short_id=slug)\n return redirect(url.http_url)\n except Exception as msg:\n raise msg\n\n\ndef url_method():\n import csv\n csvopenfile = open('/home/nishantagarwal/hdfc_falsification_sheet.csv', 'rb')\n reader = csv.reader(csvopenfile)\n file_url = [row[0] for row in reader]\n csvwritefile = open('/home/nishantagarwal/hdfc_falsification_sheet.csv', 'wb')\n writefile = csv.writer(csvwritefile)\n # import pdb;pdb.set_trace()\n check_params = Check.objects.filter(http_url__in=file_url).values(\"http_url\", 'short_id')\n for url in check_params:\n url_link = settings.SITE_URL + '/' + url['short_id']\n writefile.writerow([url['http_url'], url_link])\n exist_url = Check.objects.filter(http_url__in=file_url).values_list(\"http_url\", flat=True)\n new_url_list = list(set(file_url) - set(exist_url))\n try:\n last_id = Check.objects.all().last().id + 1\n except AttributeError:\n last_id = 1\n\n check_objs = []\n for url in new_url_list:\n if 'http://' not in url:\n http_url = 'http://' + url\n hashids = Hashids(min_length=6)\n hashid = hashids.encode(last_id)\n last_id += 1\n url_link = settings.SITE_URL + '/' + hashid\n writefile.writerow([url, url_link])\n check_objs.append(Check(http_url=http_url, short_id=hashid))\n Check.objects.bulk_create(check_objs)\n\n\ndef csv_method():\n import csv\n csvopenfile = open('/home/nishantagarwal/hdfc_falsification_sheet.csv', 'rb')\n reader = csv.reader(csvopenfile)\n # long_url = []\n # for row in reader:\n # long_url.append(row[0])\n long_url = [row[0] for row in reader]\n csvwritefile = open('/home/nishantagarwal/hdfc_falsification_sheet.csv', 'wb')\n writefile = csv.writer(csvwritefile)\n check_params = Check.objects.filter(http_url__in=long_url).values(\"http_url\", 'short_id')\n\n for url in check_params:\n url_link = settings.SITE_URL + '/' + url['short_id']\n\n writefile.writerow([url['http_url'], url_link])\n\n http_url_list = []\n for url in long_url:\n if not url == '' and not ' ' in url:\n if 'http' not in url:\n url = 'http://' + url\n http_url_list.append(url)\n\n exist_url = Check.objects.filter(http_url__in=http_url_list).values_list(\"http_url\", flat=True)\n new_url_list = list(set(long_url)-set(exist_url))\n\n last_id = Check.objects.all().last().id + 1\n check_objs = []\n for http_url in new_url_list:\n hashids = Hashids(min_length=6)\n hashid = hashids.encode(last_id)\n last_id += 1\n url_link = settings.SITE_URL + '/' + hashid\n writefile.writerow([http_url, url_link])\n check_objs.append(Check(http_url=http_url, short_id=hashid))\n Check.objects.bulk_create(*check_objs)\n csvopenfile.close()\n csvwritefile.close()\n\n for url in long_url:\n if not url == '' and not ' ' in url:\n if 'http' not in url:\n http_url = 'http://' + url\n obj, created = Check.objects.get_or_create(http_url=http_url)\n else:\n obj, created = Check.objects.get_or_create(http_url=url)\n hashid = obj.short_id\n if created:\n hashids = Hashids(min_length=6)\n hashid = hashids.encode(obj.id)\n obj.short_id = hashid\n obj.save()\n url_link = settings.SITE_URL + '/' + hashid\n\n writefile.writerow([url, url_link])\n csvopenfile.close()\n csvwritefile.close()\n\n\ndef home(request):\n if request.method == 'POST':\n access_token = settings.ACCESS_TOKEN_BITLY\n http_url = request.POST['long-url']\n shortener = Shortener('Bitly', bitly_token=access_token)\n try:\n short_url = shortener.short(http_url)\n except Exception as e:\n msg = e\n return render(request, 'home.html', {'msg': msg})\n\n return render(request, 'short_url.html', {'short_url': short_url})\n return render(request, 'home.html')\n\n\ndef index_page(request):\n return render(request, 'index.html')\n\n\n\n# @csrf_exempt\n# def shorten_url(request):\n# url = request.POST.get(\"url\", '')\n# if not (url == ''):\n# short_id = get_short_code()\n# check = Check(http_url=url, short_id=short_id)\n# check.save()\n# response_data = {}\n# response_data['url'] = settings.SITE_URL + \"/\" + short_id\n# return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n# return HttpResponse(json.dumps({\"error\": \"error occurs\"}), content_type=\"application/json\")\n\n\ndef get_short_code():\n length = 6\n char = string.ascii_uppercase + string.digits + string.ascii_lowercase\n # if the randomly generated short_id is used then generate next\n while True:\n short_id = ''.join(random.choice(char) for x in range(length))\n try:\n temp = Check.objects.get(pk=short_id)\n except:\n return short_id\n\n\n@csrf_exempt\ndef shorten_url(request):\n url = request.POST.get(\"url\", '')\n if not url == '' and not ' ' in url:\n try:\n check = Check.objects.get(http_url=url)\n short_id = check.short_id\n except Exception as e:\n short_id = get_short_code()\n check = Check(http_url=url, short_id=short_id)\n check.save()\n # logging.info(short_id)\n\n\n response_data = {}\n response_data['url'] = settings.SITE_URL + \"/\" + short_id\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n return HttpResponse(json.dumps({\"error\": \"error occurs\"}), content_type=\"application/json\")\n\n\ndef check_get(request):\n url = request.GET.get('url', '')\n data = serializers.serialize(\"xml\", Check.objects.all()) ##it will serialize the data in to xml\n\n if not url == '' and not ' ' in url:\n try:\n obj = Check.objects.get(http_url=url)\n hashid = obj.short_id\n except Exception as e:\n obj = Check.objects.create(http_url=url)\n hashids = Hashids(min_length=4)\n hashid = hashids.encode(obj.id)\n obj.short_id = hashid\n obj.save()\n\n url_link = {'link': settings.SITE_URL + '/r/' + hashid, 'id':obj.id}\n # return HttpResponse(json.dumps(url_link), content_type=\"application/json\")\n return JsonResponse(url_link)\n # return HttpResponse(url_link) ##it will not work as json is not dumped\n # return JsonResponse([1,2,3], safe=False) ##set safe false for data type except dict\n return HttpResponse(json.dumps({'error': \"error occured\"}), content_type=\"application/json\")\n\n\ndef data_get(request):\n return render(request, 'data_get.html')\n\n","sub_path":"assignment/testurl/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"389247780","text":"\n\n# # Print a separator.\n# print('-' * 18)\n\n# print(firstname)\n# print(lastname)\n\n# # Print a separator.\n# print('-' * 18)\n\nwhile True:\n# Prompt user for student's identification information...\n firstname = input(\"Enter your first name: \")\n lastname = input(\"Enter your last name: \")\n# Prompt for confirmation, and save what user entered.\n confirm = input(\"Is this information correct? (Y/N)\")\n if confirm == 'Y':\n # Break kills a loop immediately.\n print(firstname + ' ' + lastname)\n break\n else:\n # You don't need an \"else\" branch, but we're using one just to\n # demonstrate how to use continue. It just means, \"run the loop again.\"\n continue\n","sub_path":"practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"368815300","text":"num = int(input(\"Enter an integer: \"))\nlargest = 0\nevens = 0\nodds = 0\ncum_total = 0\n\nwhile num > 0:\n if num > largest:\n largest = num\n\n if num % 2 == 0:\n evens += 1 \n else:\n odds += 1\n \n cum_total = cum_total + num\n print(\"Cumulative total:\",cum_total)\n num = int(input(\"Enter an integer: \"))\n\nif largest != 0:\n print(\"Largest number:\", largest)\n print(\"Count of even numbers:\", evens)\n print(\"Count of odd numbers:\", odds)\n","sub_path":"projects/p2/int_seq.py","file_name":"int_seq.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"160700091","text":"import math\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, LSTM\nfrom tensorflow.keras.models import model_from_json\nimport os\n\nclass Predictor:\n def __init__(self,traverser):\n self.traverser = traverser\n\n def create_new_model(self,df,ticker):\n data = df.filter(['Close'])\n dataset = data.values\n training_data_len = math.ceil(len(dataset) * 0.8)\n scaler = MinMaxScaler(feature_range=(0,1))\n scaled_data = scaler.fit_transform(dataset)\n train_data = scaled_data[0:training_data_len,:]\n x_train = []\n y_train = []\n for i in range(60,len(train_data)):\n x_train.append(train_data[i-60:i,0])\n y_train.append(train_data[i,0])\n\n x_train, y_train = np.array(x_train), np.array(y_train)\n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\n\n model = Sequential()\n model.add(LSTM(50, return_sequences=True, input_shape=(x_train.shape[1], 1)))\n model.add(LSTM(50, return_sequences=False))\n model.add(Dense(25))\n model.add(Dense(1))\n model.compile(optimizer='adam',loss='mean_squared_error')\n model.fit(x_train,y_train,batch_size=1,epochs=1)\n model_json = model.to_json()\n save_path = os.path.join(os.getcwd(),\"models\",f\"{ticker}.json\")\n save_path_2 = os.path.join(os.getcwd(),\"models\",f\"{ticker}.h5\")\n try:\n with open(save_path, \"w\") as json_file:\n json_file.write(model_json)\n except FileNotFoundError:\n os.mkdir(os.path.join(os.getcwd(),\"models\"))\n with open(save_path, \"w\") as json_file:\n json_file.write(model_json)\n model.save_weights(save_path_2)\n\n last_60_days = data[-60:].values\n last_60_days_scaled = scaler.transform(last_60_days)\n todays_data = data[-61:-1].values\n todays_data_scaled = scaler.transform(todays_data)\n today_data = data.iloc[-1].values\n\n\n X_test = []\n X_test.append(last_60_days_scaled)\n X_test = np.array(X_test)\n X_test = np.reshape(X_test,(X_test.shape[0],X_test.shape[1],1))\n\n X_test_2 = []\n X_test_2.append(todays_data_scaled)\n X_test_2 = np.array(X_test_2)\n X_test_2 = np.reshape(X_test_2,(X_test_2.shape[0],X_test_2.shape[1],1))\n\n tomorrow_pred_price = model.predict(X_test)\n tomorrow_pred_price = scaler.inverse_transform(tomorrow_pred_price)\n todays_pred_price = model.predict(X_test_2)\n todays_pred_price = scaler.inverse_transform(todays_pred_price)\n\n tomorrow_pred_price = round(float(tomorrow_pred_price[0][0]),2)\n todays_pred_price = round(float(todays_pred_price[0][0]),2)\n today_data = round(today_data[0],2)\n\n return tomorrow_pred_price, todays_pred_price, today_data\n \n def load_existing_model(self,df,ticker):\n load_path = os.path.join(os.getcwd(),\"models\",f\"{ticker}.json\")\n load_path_2 = os.path.join(os.getcwd(),\"models\",f\"{ticker}.h5\")\n with open(load_path, \"r\") as json_file:\n model_json = json_file.read()\n model = model_from_json(model_json)\n model.load_weights(load_path_2)\n model.compile(optimizer='adam',loss='mean_squared_error')\n\n data = df.filter(['Close'])\n dataset = data.values\n scaler = MinMaxScaler(feature_range=(0,1))\n scaled_data = scaler.fit_transform(dataset)\n last_60_days = data[-60:].values\n last_60_days_scaled = scaler.transform(last_60_days)\n todays_data = data[-61:-1].values\n todays_data_scaled = scaler.transform(todays_data)\n today_data = data.iloc[-1].values\n\n\n X_test = []\n X_test.append(last_60_days_scaled)\n X_test = np.array(X_test)\n X_test = np.reshape(X_test,(X_test.shape[0],X_test.shape[1],1))\n\n X_test_2 = []\n X_test_2.append(todays_data_scaled)\n X_test_2 = np.array(X_test_2)\n X_test_2 = np.reshape(X_test_2,(X_test_2.shape[0],X_test_2.shape[1],1))\n\n tomorrow_pred_price = model.predict(X_test)\n tomorrow_pred_price = scaler.inverse_transform(tomorrow_pred_price)\n todays_pred_price = model.predict(X_test_2)\n todays_pred_price = scaler.inverse_transform(todays_pred_price)\n\n tomorrow_pred_price = round(float(tomorrow_pred_price[0][0]),2)\n todays_pred_price = round(float(todays_pred_price[0][0]),2)\n today_data = round(today_data[0],2)\n return tomorrow_pred_price, todays_pred_price, today_data\n \n def predict(self,df,ticker):\n if self.traverser.traverse(ticker):\n tomorrow_pred_price, todays_pred_price, today_data = self.load_existing_model(df,ticker)\n else:\n tomorrow_pred_price, todays_pred_price, today_data = self.create_new_model(df,ticker)\n return tomorrow_pred_price, todays_pred_price, today_data","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"514650870","text":"# Your task is to create a Python script that analyzes the records to calculate each of the following:\n\n# * The total number of months included in the dataset\n\n# * The total amount of revenue gained over the entire period\n\n# * The average change in revenue between months over the entire period\n\n# * The greatest increase in revenue (date and amount) over the entire period\n\n# * The greatest decrease in revenue (date and amount) over the entire period\n\n# As an example, your analysis should look similar to the one below:\n\n# ```\n# Financial Analysis\n# ----------------------------\n# Total Months: 25\n# Total Revenue: $1241412\n# Average Revenue Change: $216825\n# Greatest Increase in Revenue: Sep-16 ($815531)\n# Greatest Decrease in Revenue: Aug-12 ($-652794)\n# ```\n\nimport os\nimport csv\nimport pandas as pd\nimport glob\n\ncols = [\"Date\", \"Revenue\"]\n\nnames = pd.DataFrame()\nfor i in glob.glob('*.csv', recursive=True):\n #print(i)\n df = pd.read_csv(i, header = None, skiprows=[0],names = cols)\nprint(df)\n\ndf['DateIndex'] = df['Date']\ndf2 = df.set_index('DateIndex')\ntotal_revenue = df2['Revenue'].sum()\nmax_change = df2['Revenue'].max()\nmax_date = df2['Revenue'].argmax()\nmin_change = df2['Revenue'].min()\nmin_date = df2['Revenue'].argmin()\nunique_months = df2['Date'].unique()\ntotal_months = len(unique_months)\nAvgRevChange = total_revenue / total_months\nAvgRevChangeTest = df2['Revenue'].mean()\n\n\nprint(\"Total Months: \" + str(format(total_months, '0,')) + '\\n'\n\t+ \"Total Revenue: $\" + str(format(total_revenue,'0,')) + '\\n'\n\t+ \"Average Revenue Change: $\" + str(format(AvgRevChange,'0,')) + '\\n'\n\t+ \"Greatest Increase in Revenue: $\" + str(format(max_change,'0,')) + ' on ' + str(max_date) + '\\n'\n\t+ \"Greatest Decrease in Revenue: \" + str('${:,.2f}'.format(min_change)) + ' on ' + str(min_date)\n )","sub_path":"Python-Challenge/PyBank/.ipynb_checkpoints/main-checkpoint.py","file_name":"main-checkpoint.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"393417674","text":"# revised test file to combine population and mutations\n# a population of Candidatus Pelagibacter will be created\n# the population will have a carrying capacity of 300\n# mutation rate = 0.003 mutations/genome * 1 genome/1.3 million bases = \n# 2.30769231e-9 mutations per base per generation\n\nfrom organism import Organism\nfrom environment import Environment\nimport random\n\nfasta = open(\"./sequence.fasta\", \"r\")\nresults = open(\"./results.txt\", \"w\")\n\nMUTATION_RATE = 0.0000000023\nCARRYING_CAPACITY = 300\n\ndef parse_fasta(filename):\n sequence = \"\"\n for line in filename:\n if (not line.startswith(\">\")):\n sequence = sequence + line\n \n sequence.replace(\"\\n\", \"\")\n return sequence\n\ncandidatus = parse_fasta(fasta)\nenvironment = Environment(MUTATION_RATE, CARRYING_CAPACITY)\norganism = Organism(candidatus)\nenvironment.population_list.append(organism)\n\ndef life_cycle():\n # runs through the full life of an immortal life form\n # theoretically this should be an infinite loop\n # \n counter = 0\n for org in environment.population_list:\n if len(environment.population_list) >= CARRYING_CAPACITY:\n del environment.population_list[-1]\n org.transcribe()\n org.translate()\n environment.population_list.append(org.reproduce())\n rand = random.randint(0, (len(environment.population_list) - 1))\n environment.population_list[rand].DNA = environment.mutate(\n environment.population_list[rand].DNA)\n \n results.write(\"Generation \" + str(counter) + \"\\n\")\n results.write(org.__repr__() + \"\\n\")\n results.write(org.DNA + \"\\n\")\n results.write(\"---------------------\\n\")\n counter += 1\n\nlife_cycle()","sub_path":"test/test_Candidatus_Pelagibacter.py","file_name":"test_Candidatus_Pelagibacter.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"186764771","text":"from mock import patch,Mock\r\nimport unittest\r\nimport MyClass as MyClass\r\n\r\n# unittest.mock provides a class called Mock which you will use to imitate real objects in your codebase. \r\n# The library also provides a function, called patch(), which replaces the real objects in your code with Mock instances.\r\n# Side effect allows you to define a custom method and have that method called each time your mock method is called. \r\n# The value returned from this method will be used as the return value your mock method.\r\n\r\nclass TestClass(unittest.TestCase):\r\n # To mock a method in a class to return a specific value use @patch.object.\r\n @patch.object(MyClass.MyClass,'my_method')\r\n def test_my_method_shouldReturnTrue_whenMyMethodReturnsSomeValue(self,mock_my_method):\r\n # mock_my_method.return_value = True\r\n def new_func(a,b):\r\n return a+b\r\n mock_my_method.side_effect = new_func\r\n some_other_class = MyClass.SomeOtherClassThatUsesMyClass()\r\n result = some_other_class.method_under_test(2,1)\r\n self.assertEqual(result,3)\r\n # # To mock a method in a class with @patch.object but return a different value each time it is called\r\n # @patch.object(MyClass.MyClass,'my_method')\r\n # def test_my_method_ShouldReturnMultipleValues_whenMyMethodReturnsSomeValues(self,mock_my_method):\r\n # list_of_return_values = [True,False,True]\r\n # def side_effect():\r\n # return list_of_return_values.pop()\r\n # mock_my_method.side_effect = side_effect\r\n # some_other_class = MyClass.SomeOtherClassThatUsesMyClass()\r\n # self.assertTrue(some_other_class.method_under_test(1,2))\r\n # self.assertFalse(some_other_class.method_under_test(1,2))\r\n # self.assertTrue(some_other_class.method_under_test(1,2))\r\n # # To mock an entire class to test interactions with that class use @patch.\r\n # @patch('MyClass.MyClass')\r\n # def test_my_method_shouldCallMyMethod_whenSomeOtherClassMethodIsCalled(self,mock_my_class):\r\n # some_other_class = MyClass.SomeOtherClassThatUsesMyClass()\r\n # some_other_class.method_under_test(1,2)\r\n # self.assertTrue(mock_my_class.called)\r\nif(__name__ == '__main__'):\r\n unittest.main(verbosity=2)\r\n\r\n\r\n","sub_path":"patchdemo/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"255963339","text":"from abc import ABCMeta\nfrom neomodel import db\n\n\nclass NodeUtils:\n __metaclass__ = ABCMeta\n\n @classmethod\n def serialize_relationships(cls, nodes, relationship):\n serialized_nodes = []\n for node in nodes:\n serialized_node = node.serialize\n serialized_node['node_relationship'] = relationship\n serialized_nodes.append(serialized_node)\n\n return serialized_nodes\n\n def serialized_realtionships_of_type(self, node_type):\n results = self.cypher('''\n START p=node({self})\n MATCH n=(p)<-[r]->(x:%s)\n RETURN r, x.node_id as Node_id\n '''%(node_type)\n )\n nodes = []\n\n for row in results[0]:\n node = db.cypher_query(\n '''\n MATCH (n:%s) WHERE id(n) = %s RETURN n\n '''\n )%(node_type, row[1])\n serialized_node = node.serialize\n serialized_node['node_relationship'] = row[0].type\n nodes.append(serialized_node)\n\n return nodes\n","sub_path":"paradise_papers_search/fetch_api/models/nodeutils.py","file_name":"nodeutils.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"487716516","text":"from django.test import TestCase\nfrom django.test import Client\nfrom django.core.urlresolvers import reverse, resolve\nfrom .models import MyRequest\nfrom datetime import datetime\n\n\nclass Request_test(TestCase):\n\n def test_request_view(self):\n \"\"\" Test the request view \"\"\"\n\n found = resolve('/request_history')\n self.assertEqual(found.func.__name__, 'RequestHistory')\n\n self.client = Client()\n url = reverse(\"request_history\")\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n def test_the_model_Info(self):\n \"\"\"Test the model Request_model\"\"\"\n\n instance = MyRequest()\n instance.request_method = 'POST'\n instance.request_link = 'http://127.0.0.1:8000/'\n instance.request_time = datetime.now()\n instance.save()\n\n saved_items = MyRequest.objects.all()\n first_query = saved_items[0]\n self.assertEqual(first_query.request_method, \"POST\")\n self.assertEqual(first_query.request_link, \"http://127.0.0.1:8000/\")\n\n def test_request_middleware_stors_requests(self):\n \"\"\" Test for middleware grab request and save in db\"\"\"\n\n for c in range(4):\n url = reverse(\"home\")\n response = self.client.post(url)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(MyRequest.objects.all().count(), 5)\n\n def test_request_to_store_only_10_request_to_database(self):\n \"\"\" Test for middleware grab only 10 request and save in db\"\"\"\n for c in range(15):\n url = reverse(\"home\")\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(MyRequest.objects.all().count(), 10)\n","sub_path":"apps/request_history/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"640903944","text":"import csv\nfrom pymongo import MongoClient\nfrom datetime import datetime\n\n\ndef read_data(csv_file, db):\n \"\"\"\n Загрузить данные в бд из CSV-файла\n \"\"\"\n with open(csv_file, encoding='utf8') as csvfile:\n # прочитать файл с данными и записать в коллекцию\n reader = csv.DictReader(csvfile)\n ticket_list = []\n for row in reader:\n row = dict(row)\n row['Цена'] = int(row['Цена'])\n row['Дата'] += '.2019'\n row['Дата'] = datetime.strptime(row['Дата'], '%d.%m.%Y')\n ticket_list.append(row)\n # print(row)\n db.insert_many(ticket_list)\n # print(db)\n\ndef find_cheapest(db):\n \"\"\"\n Отсортировать билеты из базы по возрастания цены\n Документация: https://docs.mongodb.com/manual/reference/method/cursor.sort/\n \"\"\"\n for item in list(db.find().sort('Цена')):\n print(f'{item[\"Цена\"]} рублей за билет на концерт {item[\"Исполнитель\"]}')\n\ndef find_by_name(name, db):\n \"\"\"\n Найти билеты по имени исполнителя (в том числе – по подстроке),\n и вернуть их по возрастанию цены\n \"\"\"\n for item in db.find({'Исполнитель': {'$regex': name}}, {'_id': 0}).sort('Цена'):\n print(f'{item[\"Исполнитель\"]} - билет стоит {item[\"Цена\"]}')\n\ndef find_by_date(start_date, end_date, db):\n \"\"\"\n Найти билеты в заданном промежутке дат\n \"\"\"\n start = datetime.strptime(start_date, '%d.%m.%Y')\n end = datetime.strptime(end_date, '%d.%m.%Y')\n for item in db.find({'Дата': {'$gte': start, '$lte': end}}, {'_id': 0}):\n print(f'{item[\"Дата\"]} - выступает группа {item[\"Исполнитель\"]}')\n\n\nif __name__ == '__main__':\n client = MongoClient()\n concert_db = client['me']\n concert_collection = concert_db['concerts']\n read_data('artists.csv', concert_collection)\n print('\\nСортировка по возрастанию цены:')\n find_cheapest(concert_collection)\n print('\\nПоиск по имени:')\n find_by_name('Th', concert_collection)\n print('\\nПоиск по дате:')\n find_by_date('01.07.2019', '30.07.2019', concert_collection)\n concert_collection.drop()\n","sub_path":"mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"44733237","text":"def checkio(d):\n binIPs = [''.join('{0:08b}'.format(int(s)) for s in a.split('.')) for a in d]\n\n subnet = 32\n while len(set(i[:subnet] for i in binIPs)) > 1:\n subnet -= 1\n\n route = binIPs[0][:subnet].ljust(32, '0')\n route = '.'.join(str(int(route[i:][:8], 2)) for i in range(0, 32, 8))\n\n return '{0}/{1}'.format(route, subnet)\n","sub_path":"CiO/ip-network-route-summarization.py","file_name":"ip-network-route-summarization.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198953680","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 05 09:39:59 2018\n\n@author: Memphis\n\"\"\"\n#\nwith open('rosalind_ba1f.txt') as input_data:\n text = input_data.readline().strip()\nres = [0]* (len(text)+1)\nfor i in range(len(text)):\n if text[i] == 'C':\n res[i+1] = res[i] - 1\n elif text[i] == \"G\":\n res[i+1] = res[i] + 1\n else:\n res[i+1] = res[i]\nvalue = min(res)\nfor i in range(1,len(res)):\n if res[i] == value:\n print(i),\n ","sub_path":"bioinformatics textbook track/BA1F.py","file_name":"BA1F.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115004748","text":"#!/bin/python\nimport os\nimport roomai.common\nimport copy\n\n\n#\n#0, 1, 2, 3, ..., 7, 8, 9, 10, 11, 12, 13, 14\n#^ ^ ^ ^ ^\n#| | | | |\n#3, 10, J, Q, K, A, 2, r, R\n#\n\nclass DouDiZhuActionElement:\n \"\"\"\n \"\"\"\n str_to_rank = {'3':0, '4':1, '5':2, '6':3, '7':4, '8':5, '9':6, 'T':7, 'J':8, 'Q':9, 'K':10, 'A':11, '2':12, 'r':13, 'R':14, 'x':15, 'b':16}\n # x means check, b means bid\n rank_to_str = {0: '3', 1: '4', 2: '5', 3: '6', 4: '7', 5: '8', 6: '9', 7: 'T', 8: 'J', 9: 'Q', 10: 'K', 11: 'A', 12: '2', 13: 'r', 14: 'R', 15: 'x', 16: 'b'}\n three = 0;\n four = 1;\n five = 2;\n six = 3;\n seven = 4;\n eight = 5;\n night = 6;\n ten = 7;\n J = 8;\n Q = 9;\n K = 10;\n A = 11;\n two = 12;\n r = 13;\n R = 14;\n cheat = 15;\n bid = 16;\n\n total_normal_cards = 15\n\n\n\nclass DouDiZhuPokerAction(roomai.common.AbstractAction):\n \"\"\"\n \"\"\"\n def __init__(self):\n \"\"\"\n\n \"\"\"\n pass\n\n def __init__(self, masterCards, slaveCards):\n \"\"\"\n\n Args:\n masterCards:\n slaveCards:\n \"\"\"\n self.__masterCards = [c for c in masterCards]\n self.__slaveCards = [c for c in slaveCards]\n\n self.__masterPoints2Count = None\n self.__slavePoints2Count = None\n self.__isMasterStraight = None\n self.__maxMasterPoint = None\n self.__minMasterPoint = None\n self.__pattern = None\n\n self.action2pattern()\n self.__key = DouDiZhuPokerAction.master_slave_cards_to_key(masterCards, slaveCards)\n\n\n\n @property\n def key(self): return self.__key\n @property\n def masterCards(self): return self.__masterCards\n @property\n def slaveCards(self): return self.__slaveCards\n @property\n def masterPoints2Count(self): return self.__masterPoints2Count\n @property\n def slavePoints2Count(self): return self.__slavePoints2Count\n @property\n def isMasterStraight(self): return self.__isMasterStraight\n @property\n def maxMasterPoint(self): return self.__maxMasterPoint\n @property\n def minMasterPoint(self): return self.__minMasterPoint\n @property\n def pattern(self): return self.__pattern\n\n @classmethod\n def lookup(cls, key):\n \"\"\"\n\n Args:\n key:\n\n Returns:\n\n \"\"\"\n return AllActions[\"\".join(sorted(key))]\n\n @classmethod\n def master_slave_cards_to_key(cls, masterCards, slaveCards):\n \"\"\"\n\n Args:\n masterCards:\n slaveCards:\n\n Returns:\n\n \"\"\"\n key_int = (masterCards + slaveCards)\n key_str = []\n for key in key_int:\n key_str.append(DouDiZhuActionElement.rank_to_str[key])\n key_str.sort()\n return \"\".join(key_str)\n\n def action2pattern(self):\n \"\"\"\n\n \"\"\"\n\n self.__masterPoints2Count = dict()\n for c in self.__masterCards:\n if c in self.__masterPoints2Count:\n self.__masterPoints2Count[c] += 1\n else:\n self.__masterPoints2Count[c] = 1\n\n self.__slavePoints2Count = dict()\n for c in self.__slaveCards:\n if c in self.__slavePoints2Count:\n self.__slavePoints2Count[c] += 1\n else:\n self.__slavePoints2Count[c] = 1\n\n self.__isMasterStraight = 0\n num = 0\n for v in self.__masterPoints2Count:\n if (v + 1) in self.__masterPoints2Count and (v + 1) < DouDiZhuActionElement.two:\n num += 1\n if num == len(self.__masterPoints2Count) - 1 and len(self.__masterPoints2Count) != 1:\n self.__isMasterStraight = 1\n\n self.__maxMasterPoint = -1\n self.__minMasterPoint = 100\n for c in self.__masterPoints2Count:\n if self.__maxMasterPoint < c:\n self.__maxMasterPoint = c\n if self.__minMasterPoint > c:\n self.__minMasterPoint = c\n\n ########################\n ## action 2 pattern ####\n ########################\n\n\n # is cheat?\n if len(self.__masterCards) == 1 \\\n and len(self.__slaveCards) == 0 \\\n and self.__masterCards[0] == DouDiZhuActionElement.cheat:\n self.__pattern = AllPatterns[\"i_cheat\"]\n\n # is roblord\n elif len(self.__masterCards) == 1 \\\n and len(self.__slaveCards) == 0 \\\n and self.__masterCards[0] == DouDiZhuActionElement.bid:\n self.__pattern = AllPatterns[\"i_bid\"]\n\n # is twoKings\n elif len(self.__masterCards) == 2 \\\n and len(self.__masterPoints2Count) == 2 \\\n and len(self.__slaveCards) == 0 \\\n and self.__masterCards[0] in [DouDiZhuActionElement.r, DouDiZhuActionElement.R] \\\n and self.__masterCards[1] in [DouDiZhuActionElement.r, DouDiZhuActionElement.R]:\n self.__pattern = AllPatterns[\"x_rocket\"]\n\n else:\n\n ## process masterCards\n masterPoints = self.__masterPoints2Count\n if len(masterPoints) > 0:\n count = masterPoints[self.__masterCards[0]]\n for c in masterPoints:\n if masterPoints[c] != count:\n self.__pattern = AllPatterns[\"i_invalid\"]\n\n if self.__pattern == None:\n pattern = \"p_%d_%d_%d_%d_%d\" % (len(self.__masterCards), len(masterPoints), \\\n self.__isMasterStraight, \\\n len(self.__slaveCards), 0)\n\n if pattern in AllPatterns:\n self.__pattern = AllPatterns[pattern]\n else:\n self.__pattern = AllPatterns[\"i_invalid\"]\n\n def __deepcopy__(self, memodict={}, newinstance = None):\n \"\"\"\n\n Args:\n memodict:\n newinstance:\n\n Returns:\n\n \"\"\"\n return self.lookup(self.key)\n\n\n\n############## read data ################\nAllPatterns = dict()\nAllActions = dict()\nimport zipfile\ndef get_file(path):\n \"\"\"\n\n Args:\n path:\n\n Returns:\n\n \"\"\"\n if \".zip\" in path:\n lines = path.split(\".zip\")\n zip1 = zipfile.ZipFile(lines[0] + \".zip\")\n len1 = len(lines[1])\n path = lines[1][1:len1]\n return zip1.open(path)\n else:\n return open(path)\npath = os.path.split(os.path.realpath(__file__))[0]\npattern_file = get_file(path + \"/patterns.py\")\nfor line in pattern_file:\n line = line.replace(\" \", \"\").strip()\n line = line.split(\"#\")[0]\n if len(line) == 0 or len(line[0].strip()) == 0:\n continue\n lines = line.split(\",\")\n for i in range(1, len(lines)):\n lines[i] = int(lines[i])\n AllPatterns[lines[0]] = lines\npattern_file.close()\n\n\n\naction_file = get_file(path + \"/actions.py\")\nfor line in action_file:\n line = line.replace(\" \", \"\").strip()\n lines = line.split(\"\\t\")\n\n if lines[3] not in AllPatterns:\n continue\n\n m = [int(str1) for str1 in lines[1].split(\",\")]\n s = []\n if len(lines[2]) > 0:\n s = [int(str1) for str1 in lines[2].split(\",\")]\n action = DouDiZhuPokerAction(m, s)\n if action.key != lines[0] or action.pattern[0] != lines[3]:\n print (lines)\n raise ValueError(\"%s is wrong. The generated action has key(%s) and pattern(%s)\"%(line, action.key,action.pattern[0]))\n\n\n AllActions[action.key] = action\naction_file.close()\n\n\n\n","sub_path":"roomai/doudizhu/DouDiZhuPokerAction.py","file_name":"DouDiZhuPokerAction.py","file_ext":"py","file_size_in_byte":7728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"177902737","text":"import streamlit as st\nimport pandas as pd\nimport yfinance as yf\nfrom datetime import date\nfrom app_db import sectors\nfrom app_db import macros\n\ncol1, col2 = st.beta_columns(2)\n\n\n# @st.cache\ndef load_data(stock_list, start_date):\n data_list = [yf.download(stock, start_date)['Close'] for stock in stock_list]\n data = pd.concat(data_list, axis=1)\n data.columns = [code for code in stock_list]\n # long_name = [yf.Ticker(ticker).info['longName'] for ticker in data.columns]\n\n return data\n\n\n# sidebar construct\nst.sidebar.title('Stocks to Watch')\nstart_date = st.sidebar.date_input(\"주가그래프 시작일을 선택하세요. 디폴트 2020/1/1\", value=date(2020, 1, 1))\nsector_choice = st.sidebar.selectbox('섹터를 선택하세요', [k for k in sectors.keys()])\nstock_list = st.sidebar.multiselect('Select Stocks', sectors[sector_choice], sectors[sector_choice])\nst.sidebar.write('----------------------')\nst.sidebar.title('Macro Indicator')\nst.sidebar.write('WORKS TO DO')\nmacro_choice = st.sidebar.selectbox('인디케이터 선택하세요', [k for k in macros.keys()])\nmacro_list = st.sidebar.multiselect('Select Indicators', macros[macro_choice], macros[macro_choice])\nst.sidebar.text('copyright by Taeyoon Lee')\n\n\ndf = load_data(sectors[sector_choice], start_date)\n\nn = 1\nfor stock in stock_list:\n day0_price = df[stock][-1]\n day1_price = df[stock][-2]\n note_up = f'price: {round(day0_price,2)} up by {(day0_price/day1_price -1)*100:.2f} %'\n note_down = f'price: {round(day0_price, 2)} down by {(day0_price / day1_price - 1) * 100:.2f} %'\n if n == 1:\n col1.title(stock)\n # col1.write(company_name[count])\n if day0_price >= day1_price:\n col1.write(note_up)\n else:\n col1.write(note_down)\n col1.line_chart(df[stock])\n n = 2\n elif n == 2:\n col2.title(stock)\n # col2.write(company_name[count])\n if day0_price >= day1_price:\n col2.write(note_up)\n else:\n col2.write(note_down)\n col2.line_chart(df[stock])\n n = 1\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"120035055","text":"import time\nimport seedsbot.main as seedsbot\nimport flask\nimport threading\nimport os\n\nRETRY_INTERVAL = 5\nENABLE_LIVECHECKER_ENV_VAR = \"ENABLE_LIVECHECKER\"\n\nlc_env = os.getenv(ENABLE_LIVECHECKER_ENV_VAR)\nENABLE_LIVECHECKER = lc_env is not None and lc_env.lower() in [\"1\", \"true\", \"yes\"]\nprint(\"Live checker enabled:\", ENABLE_LIVECHECKER)\n\nflask_app = flask.Flask(__name__)\n\n@flask_app.route('/')\ndef livecheck():\n return \"Hi!\"\n\ndef run_livechecker():\n flask_app.run(host=\"0.0.0.0\", port=8080)\n\nif __name__ == \"__main__\":\n \n if ENABLE_LIVECHECKER:\n lc_thread = threading.Thread(daemon=True, target=run_livechecker)\n lc_thread.start()\n \n while True:\n try:\n seedsbot.run()\n except Exception as err:\n print(err)\n print(\"Retrying in\", RETRY_INTERVAL)\n time.sleep(RETRY_INTERVAL)\n","sub_path":"runbot.py","file_name":"runbot.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"424325833","text":"class matche_record():\r\n\tdef __init__(self,\r\n\t\t\t\tsummoner_name,\r\n\t\t\t\tgameId,\r\n\t\t\t\tchampion,\r\n\t\t\t\ttimestamp,\r\n\t\t\t\tlane,\r\n\t\t\t\tqueue,\r\n\t\t\t\tseason,\r\n\t\t\t\tgameMode,\r\n\t\t\t\taccountId,\r\n\t\t\t\tparticipantId,\r\n\t\t\t\twin,\r\n\t\t\t\tphysicalDamageDealt,\r\n\t\t\t\tmagicDamageDealt,\r\n\t\t\t\ttotalDamageDealt,\r\n\t\t\t\tkills,\r\n\t\t\t\tassists,\r\n\t\t\t\tdeaths,\r\n\t\t\t\ttotalDamageTaken,\r\n\t\t\t\ttotalMinionsKilled,\r\n\t\t\t\ttotalPlayerScore,\r\n\t\t\t\tgoldEarned,\r\n\t\t\t\tgoldSpent,\r\n\t\t\t\tposted):\r\n\t\tself.summoner_name = summoner_name\r\n\t\tself.gameId = gameId\r\n\t\tself.champion = champion\r\n\t\tself.timestamp = timestamp\r\n\t\tself.lane = lane\r\n\t\tself.queue = queue\r\n\t\tself.season = season\r\n\t\tself.gameMode = gameMode\r\n\t\tself.accountId = accountId\r\n\t\tself.participantId = participantId\r\n\t\tself.win = win\r\n\t\tself.physicalDamageDealt = physicalDamageDealt\r\n\t\tself.magicDamageDealt = magicDamageDealt\r\n\t\tself.totalDamageDealt = totalDamageDealt\r\n\t\tself.kills = kills\r\n\t\tself.assists = assists\r\n\t\tself.deaths = deaths\r\n\t\tself.totalDamageTaken = totalDamageTaken\r\n\t\tself.totalMinionsKilled = totalMinionsKilled\r\n\t\tself.totalPlayerScore = totalPlayerScore\r\n\t\tself.goldEarned = goldEarned\r\n\t\tself.goldSpent = goldSpent\r\n\t\tself.posted = posted\r\n\r\n\r\nclass summoner_object():\r\n\tdef __init__(self, summoner_id, summonerLevel, rank, leagueName, tier, hotStreak, wins, losses, summoner_name, leaguePoints, date_new_rank, date_new_tier, accountId):\r\n\t\tself.summoner_id = summoner_id\r\n\t\tself.summonerLevel = summonerLevel\r\n\t\tself.rank = rank\r\n\t\tself.leagueName = leagueName\r\n\t\tself.tier = tier\r\n\t\tself.hotStreak = hotStreak\r\n\t\tself.wins = wins\r\n\t\tself.losses = losses\r\n\t\tself.summoner_name = summoner_name\r\n\t\tself.leaguePoints = leaguePoints\r\n\t\tself.date_new_rank = date_new_rank\r\n\t\tself.date_new_tier = date_new_tier\r\n\t\tself.accountId = accountId\r\n","sub_path":"custom_class.py","file_name":"custom_class.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"166876367","text":"from flask import Flask, jsonify, request, send_from_directory\nfrom wallet import Wallet\nfrom flask_cors import CORS\nfrom blockchain import Blockchain\n\napp = Flask(__name__)\n# wallet.create_keys()\nCORS(app)\n# Get Routes\n\n\n@app.route('/', methods=['GET'])\ndef get_node_ui():\n return send_from_directory('ui', 'node.html')\n\n\n@app.route('/network', methods=['GET'])\ndef get_network_ui():\n return send_from_directory('ui', 'network.html')\n\n\n@app.route('/nodes', methods=['GET'])\ndef get_nodes():\n all_nodes = blockchain.get_all_nodes()\n response = {\n 'all_nodes': all_nodes\n }\n return jsonify(response), 200\n\n\n@app.route('/walette', methods=['GET'])\ndef load_keys():\n if wallet.load_keys():\n global blockchain\n blockchain = Blockchain(wallet.public_key, port)\n response = {\n 'public_key': wallet.public_key,\n 'private_key': wallet.private_key,\n 'funds': blockchain.get_balence()\n }\n return jsonify(response), 201\n else:\n response = {'message': 'loading the keys failed !'}\n return jsonify(response), 500\n\n\n@app.route('/balance', methods=['GET'])\ndef get_balence():\n balance = blockchain.get_balence()\n if balance != None:\n response = {\n 'message': 'fetching balance succeded ',\n 'funds': balance\n }\n return jsonify(response), 200\n else:\n response = {\n 'message': 'Loading balance failed !',\n 'wallet_set_up': wallet.public_key != None\n }\n return jsonify(response), 500\n\n\n@app.route('/chain', methods=['GET'])\ndef get_chain():\n chain_snapshot = blockchain.chain\n dict_chain = [block.__dict__.copy() for block in chain_snapshot]\n for dict_block in dict_chain:\n dict_block['transactions'] = [tx.__dict__.copy()\n for tx in dict_block['transactions']]\n return jsonify(dict_chain), 200\n\n\n@app.route('/transactions', methods=['GET'])\ndef get_transaction():\n transactions = blockchain.get_open_transactions()\n dict_transactions = [tx.__dict__ for tx in transactions]\n # response = {\n # 'message' : 'fetched transactions seuccessfuly !',\n # 'transactions' : dict_transactions\n # }\n return jsonify(dict_transactions), 200\n\n\n# Post Routes\n\n\n@app.route('/mine', methods=['POST'])\ndef mine():\n print(blockchain.resolve_conflits)\n if blockchain.resolve_conflits: \n response = {\n 'messsage':'resolve conflict first, block not added'\n }\n return jsonify(response) , 409 \n block = blockchain.mine_block()\n if block != None:\n dict_block = block.__dict__.copy()\n dict_block['transactions'] = [\n tx.__dict__ for tx in dict_block['transactions']]\n # Succée\n response = {\n 'message': 'block added successfuly',\n 'block': dict_block,\n 'funds': blockchain.get_balence()\n }\n return jsonify(response), 201\n else:\n # Echéc\n response = {\n 'message': 'adding block failed',\n 'wallet_set_up': wallet.public_key != None,\n 'funds': blockchain.get_balence()\n }\n return jsonify(response), 500\n\n\n@app.route('/node', methods=['POST'])\ndef add_node():\n values = request.get_json()\n if not values:\n response = {\n 'message': 'No Data'\n }\n return jsonify(response), 500\n if 'node' not in values:\n response = {\n 'message': 'No node data found'\n }\n return jsonify(response), 500\n node = values['node']\n blockchain.add_peer_node(node)\n response = {\n 'message': 'node added successfuly',\n 'all_nodes': blockchain.get_all_nodes()\n }\n return jsonify(response), 201\n\n\n@app.route('/transaction', methods=['POST'])\ndef add_transaction():\n print('staaaarts')\n if wallet.public_key == None:\n response = {\n 'message': \"no walette set up\"\n }\n return jsonify(response), 400\n values = request.get_json()\n if not values:\n response = {\n 'message': 'No data found'\n }\n return jsonify(response), 500\n required_fields = ['recipient', 'amount']\n if not all(field in values for field in required_fields):\n response = {\n 'message': 'required data is missing'\n }\n return jsonify(response),\n recipient = values['recipient']\n amount = values['amount']\n signature = wallet.sign_transaction(wallet.public_key, recipient, amount)\n success = blockchain.add_transaction(\n recipient, wallet.public_key, signature, amount)\n if success:\n response = {\n 'message': \"successfuly added transaction\",\n 'transaction': {\n 'sender': wallet.public_key,\n 'recipient': recipient,\n 'amount': amount,\n 'signature': signature\n },\n 'funds': blockchain.get_balence()\n }\n return jsonify(response), 201\n\n else:\n response = {\n 'message': 'creating transaction failed',\n }\n return jsonify(response), 500\n\n\n@app.route('/walette', methods=['POST'])\ndef create_keys():\n wallet.create_keys()\n if wallet.save_to_file():\n global blockchain\n blockchain = Blockchain(wallet.public_key, port)\n response = {\n 'public_key': wallet.public_key,\n 'private_key': wallet.private_key,\n 'funds': blockchain.get_balence()\n }\n return jsonify(response), 201\n else:\n response = {'message': 'saving the keys failed !'}\n return jsonify(response), 500\n\n\n@app.route('/broadcast-transaction', methods=['POST'])\ndef broadcast_transaction():\n values = request.get_json()\n if not values:\n response = {\n 'message': 'No data found'\n }\n return jsonify(response), 500\n required = ['sender', 'recipient', 'amount', 'signature']\n if not all([key in values for key in required]):\n response = {\n 'message': 'some data is missing'\n }\n return jsonify(response), 400\n success = blockchain.add_transaction(\n values['recipient'], values['sender'], values['signature'], values['amount'], is_reciving=True)\n if success:\n response = {\n 'message': \"successfuly added transaction\",\n 'transaction': {\n 'sender': values['sender'],\n 'recipient': values['recipient'],\n 'amount': values['amount'],\n 'signature': values['signature']\n }\n }\n return jsonify(response), 200\n else:\n response = {\n 'message': 'creating transaction failed',\n }\n return jsonify(response), 500\n\n\n@app.route('/broadcast-block', methods=['POST'])\ndef broadcast_block():\n values = request.get_json()\n if not values:\n response = {\n 'message': 'No data found'\n }\n return jsonify(response), 400\n if 'block' not in values:\n response = {\n 'message': 'block data is missing'\n }\n return jsonify(response), 400\n\n block = values['block']\n if block['index'] == blockchain.chain[-1].index + 1:\n if blockchain.add_block(block): \n print('block_added')\n response = {\n 'message':'block added!', \n }\n return jsonify(response) , 201\n else: \n print('block not added')\n response = {\n 'message':'block seems invalid !'\n }\n return jsonify(response) , 409\n elif block['index'] > blockchain.chain[-1].index:\n print(str(block['index']) , str(blockchain.chain[-1].index))\n response = {\n 'message': 'blockchain seems to different to the local blockchain'\n }\n blockchain.resolve_conflits = True\n return jsonify(response), 200\n else:\n response = {\n 'message': 'blockchain seems to be shorter, block not edit'\n }\n return jsonify(response), 409\n # status code 409 means that the data sent is invalid\n\n\n@app.route('/resolve-conflicts', methods=['POST'])\ndef resolve_conflicts():\n replaced = blockchain.resolve()\n if replaced:\n response= {\n 'message':'Chain replaced'\n }\n else:\n response ={\n 'message':'Local chain capted'\n }\n return jsonify(response) , 200\n\n# DELETE Routes\n\n@app.route('/node/', methods=['DELETE'])\ndef remove_node(node_url):\n if node_url == '' or node_url == None:\n response = {\n 'message': \"No Node found\"\n }\n return jsonify(response), 500\n blockchain.remove_peer_node(node_url)\n response = {\n 'message': 'Node removed',\n 'all_nodes': blockchain.get_all_nodes()\n }\n return jsonify(response), 200\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('-p', '--port', type=int, default=5000)\n args = parser.parse_args()\n port = args.port\n wallet = Wallet(port)\n blockchain = Blockchain(wallet.public_key, port)\n app.run(host='0.0.0.0', port=port)\n","sub_path":"node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":9291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"80039394","text":"# -*- coding: utf-8 -*-\n\"\"\"\nAuthor : Shameer Sathar\n\"\"\"\nfrom __future__ import print_function\nfrom hyperopt import Trials, STATUS_OK, tpe\nfrom hyperas import optim\nfrom hyperas.distributions import choice, uniform, conditional\n\nimport numpy as np\nimport scipy.io as sio\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nfrom keras.models import load_model\nfrom keras import backend as K\nK.set_image_dim_ordering('th')\n\nfrom sklearn.model_selection import train_test_split\nfrom hyperas.utils import eval_hyperopt_space\n\n\ndef data():\n X_data = np.load('/media/hpc/codes/GitLab/sigpy_github/X_train_all.npy')\n Y_data = np.load('/media/hpc/codes/GitLab/sigpy_github/Y_train_all.npy')\n Y_data_1D = np.argmax(Y_data, axis=1)\n dataset = np.hstack((X_data, np.expand_dims(Y_data_1D, axis=1)))\n dataset_0 = dataset[np.where(Y_data_1D==0)]\n dataset_1 = dataset[np.where(Y_data_1D==1)]\n select_0 = np.random.choice(dataset_0.shape[0],dataset_1.shape[0], replace=False)\n dataset_0_selected = dataset_0[select_0, :]\n dataset_test_train = np.vstack((dataset_0_selected, dataset_1))\n np.random.shuffle(dataset_test_train)\n np.random.shuffle(dataset_test_train)\n X_train, X_test, y_train, y_test = train_test_split(dataset_test_train[:, :-1],\n dataset_test_train[:, -1], test_size=0.3, random_state=42)\n y_train = np_utils.to_categorical(y_train, 2)\n y_test = np_utils.to_categorical(y_test, 2)\n X_train = X_train.reshape((-1, 1, 6, 6))\n X_test = X_test.reshape((-1, 1, 6, 6))\n print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)\n return X_train, y_train, X_test, y_test\n\ndef model(X_train, y_train, X_test, y_test):\n model = Sequential()\n #1st Convolution Layer\n model.add(Convolution2D({{choice([16, 32, 64])}}, 3, 3, activation='relu',\n kernel_initializer='glorot_uniform',\n input_shape=(1, 6, 6)))\n model.add(MaxPooling2D(pool_size=(2,2)))\n\n #2nd Convolution Layer\n model.add(Convolution2D({{choice([16, 32, 64])}}, 2, 2, activation='relu'))\n model.add(MaxPooling2D(pool_size=(1,1)))\n model.add(Dropout({{uniform(0, 1)}}))\n\n #Fully connected layer\n model.add(Flatten())\n model.add(Dense(256, activation='relu'))\n model.add(Dropout({{uniform(0, 1)}}))\n\n # Output\n model.add(Dense(2, activation='softmax'))\n\n # Optimizer\n sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n #Compile the model\n model.compile(loss='categorical_crossentropy',\n optimizer=sgd,\n metrics=['accuracy'])\n\n # Train the network\n model.fit(X_train, y_train, nb_epoch=15, verbose=1)\n scores, acc = model.evaluate(X_test, y_test)\n print('Test accuracy:', acc)\n return {'loss': -acc, 'status': STATUS_OK, 'model': model}\n\n# Start Qt event loop unless running in interactive mode.\nif __name__ == '__main__' :\n # #print(model(X_train, y_train, X_test, y_test))\n best_run, best_model = optim.minimize(model=model,\n data=data,\n algo=tpe.suggest,\n max_evals=5,\n trials=Trials())\n X_train, y_train, X_test, y_test = data()\n print(\"Evalutation of best performing model:\")\n print(best_model.evaluate(X_test, y_test))\n best_model.save('/media/hpc/codes/GitLab/sigpy_master/sigpy/ml_models/nn_2D')\n print(\"Best performing model chosen hyper-parameters:\")\n print(best_run)\n","sub_path":"hyperparameter_opt/main_optimisation_2d_keras.py","file_name":"main_optimisation_2d_keras.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"125111949","text":"import numpy as np\nimport cv2\n\n'''\n def get_HDR_radiance_map(stack, t, g, w):\n 利用对数域合并方法 [Debevec and Malik, 1997] 合并\n stack 中的曝光栈,获得高动态范围的图像\n 参数:\n stack: list np.ndarray\n 曝光栈\n t: list float\n 曝光时间的对数值\n g: np.array\n 相机响应函数\n w: callable\n 权值函数\n 返回值:\n np.ndarray\n 曝光栈场景的 HDR 图像\n'''\ndef DM97_radiance_map(stack, t, g, w):\n resolution = stack[0].shape\n hdr = np.empty(resolution, dtype=np.float64)\n img_num = len(stack)\n for i in range(resolution[0]):\n \n for j in range(resolution[1]):\n p_ij = np.array([g[stack[k][i,j]] for k in range(img_num)])\n w_ij = np.array([w(stack[k][i,j]) for k in range(img_num)])\n sum_w = np.sum(w_ij)\n\n if sum_w > 0:\n hdr[i, j] = np.sum(w_ij * (p_ij - t) / sum_w)\n else:\n hdr[i, j] = p_ij[img_num // 2] - t[img_num // 2]\n # print('merging iter', i,' nan:',np.sum(np.isnan(hdr[i,:])))\n return cv2.normalize(np.exp(hdr), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n","sub_path":"util/merging.py","file_name":"merging.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159324952","text":"# coding: utf-8\n\nimport numpy as np\nfrom detectors.base import calc_score_over_time\n\n\nclass W_UPDATE(object):\n def __init__(self, classifier, **kwargs):\n self.clf = classifier\n self.drifts = []\n\n def run(self, data, train_size, update_policy=None, change_points=None, show_progress=None):\n self.data_len = len(data)\n if change_points is None:\n change_points = [self.data_len // 2]\n\n head = 0\n X = data[head:head + train_size, :-1]\n y = data[head:head + train_size, -1]\n self.clf.fit(X, y)\n y_pred = self.clf.predict(X).tolist()\n head = train_size\n\n for cp in change_points:\n if (cp + train_size) > self.data_len:\n break\n if (cp + train_size) <= head:\n continue\n self.drifts.append(cp)\n\n X = data[head:cp + train_size, :-1]\n y_pred = y_pred + self.clf.predict(X).tolist()\n\n X = data[cp:cp + train_size, :-1]\n y = data[cp:cp + train_size, -1]\n self.clf.fit(X, y)\n head = cp + train_size\n\n if head < self.data_len:\n X = data[head:, :-1]\n y_pred = y_pred + self.clf.predict(X).tolist()\n\n y_pred = np.array(y_pred)\n score = calc_score_over_time(data[:, -1], y_pred, skip=train_size)\n return self.drifts, score\n\n\nclass WO_UPDATE(object):\n def __init__(self, classifier, **kwargs):\n self.clf = classifier\n\n def run(self, data, train_size, update_policy=None, show_progress=None):\n drifts = []\n X = data[:train_size, :-1]\n y = data[:train_size, -1]\n self.clf.fit(X, y)\n X = data[:, :-1]\n y = data[:, -1]\n y_pred = self.clf.predict(X)\n score = calc_score_over_time(data[:, -1], y_pred, skip=train_size)\n return drifts, score\n\n\nclass MA1(object):\n def __init__(self, classifier, **kwargs):\n super().__init__()\n\n def run(self, data, train_size, update_policy=None):\n self.drifts = []\n self.score = calc_score_over_time(data[:, -1], np.array([0] + list(data[:-1, -1])), skip=train_size)\n return self.drifts, self.score\n\n\nif __name__ == '__main__':\n from detectors.base import detection_test\n print('update')\n detection_test(W_UPDATE)\n\n print('no update')\n detection_test(WO_UPDATE)\n\n print('1 moving average')\n detection_test(MA1)\n","sub_path":"src/detectors/baselines.py","file_name":"baselines.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"48520596","text":"from flask import Flask, render_template, request\nfrom flask_mysqldb import MySQL\nimport MySQLdb.cursors\n\napp = Flask(__name__)\n\n#code for mysql connection\napp.config['MYSQL_HOST'] = 'localhost' #hostname\napp.config['MYSQL_USER'] = 'root' #username\napp.config['MYSQL_PASSWORD'] = '' #password\napp.config['MYSQL_DB'] = 'Payment' #database name\n\nmysql = MySQL(app)\n@app.route(\"/\")\ndef index():\n\treturn render_template(\"PaymentForm.html\")\n\n@app.route(\"/success\",methods=['POST'])\ndef success():\n\tif 'name' in request.form and 'gender' in request.form and 'address' in request.form and 'email' in request.form and 'pincode' in request.form and 'card' in request.form and 'cardNumber' in request.form and 'expiryDate' in request.form and 'cvv' in request.form:\n\t\tname = request.form['name']\n\t\tgender = request.form['gender']\n\t\taddress = request.form['address']\n\t\temail = request.form['email']\n\t\tpincode = request.form['pincode']\n\t\tcardType = request.form['card']\n\t\tcardNumber = request.form['cardNumber']\n\t\texpiryDate = request.form['expiryDate']\n\t\tcvv = request.form['cvv']\n\n\t\tcursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n\t\tcursor.execute('INSERT INTO details VALUES (NULL,%s,%s,%s,%s,%s,%s,SHA1(%s),%s,SHA1(%s))',(name,gender,address,email,pincode,cardType,cardNumber,expiryDate,cvv))\n\t\tmysql.connection.commit()\n\t\treturn render_template(\"complete.html\")\n\n\telse:\n\t\treturn ('Error! Payment Not completed!')\n","sub_path":"payment_from/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"563093811","text":"def resolve():\n N = int(input())\n moves = []\n for i in range(N):\n l = list(map(int, input().split(\" \")))\n moves.append(l)\n \n now = [0,0,0]\n for i in moves:\n dt = i[0] - now[0]\n dx = i[1] - now[1]\n dy = i[2] - now[2]\n if dt < dx+dy:\n print(\"No\")\n return\n \n if not (dt % 2) == ((dx+dy) % 2):\n print(\"No\")\n return\n now = i\n \n print(\"Yes\")\n\nif '__main__' == __name__:\n resolve()","sub_path":"BeginnersSelection/ABC086C.py","file_name":"ABC086C.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"92454766","text":"import os\nfrom tempfile import mkstemp\nfrom pdb import set_trace as tr\n\ndef exec_cpptraj(topfile, script, delscriptfile=True):\n handle, scriptfile = mkstemp(prefix='junk', dir='/tmp')\n handle, stdoutputfile = mkstemp(prefix='junk', dir='/tmp')\n open(scriptfile,'w').write(script)\n if topfile:\n os.system('module load amber/amber14; cpptraj -p {0} -i {1} > {2}'.format(topfile, scriptfile,stdoutputfile))\n else:\n os.system('module load amber/amber14; cpptraj -i {0} > {2}'.format(scriptfile,stdoutputfile)) \n if delscriptfile:\n os.system('/bin/rm {0}'.format(scriptfile))\n stdoutput = open(stdoutputfile).readlines()\n os.system('/bin/rm {0}'.format(stdoutputfile))\n return scriptfile, stdoutput\n","sub_path":"python/amber/amber14/cpptraj/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"561107802","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n#Write by:En_dust\nimport requests\nfrom bs4 import BeautifulSoup\nimport sys\nreload(sys) \nsys.setdefaultencoding('utf-8')\n\ndef main(page):\n\ttry:\n\t\tr = requests.get(\"https://www.kuaidaili.com/free/inha/\"+ str(page) +\"/\")\n\t\traw = r.content\n\t\tsoup = \tBeautifulSoup(raw,'html.parser')\n\t\tstep1 = soup.find_all(\"td\")\n\t\tfor i in range(0,105,7):\n\t\t\tip = str(step1[i].string)\n\t\t\tport = str(step1[i+1].string)\n\t\t\tprint(ip + \":\" + port)\n\texcept requests.exceptions.ConnectionError:\n\t\tpass\n\n\n\nif __name__ == '__main__':\n\tset_page = sys.argv[1]\n\tfor page in range(1,int(set_page)+1):\n\t\tmain(page)\n","sub_path":"getproxy.py","file_name":"getproxy.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"146125314","text":"\n'''\nA school decided to replace the desks in three classrooms. Each desk sits two students.\nGiven the number of students in each class, print the smallest possible number of desks that can be purchased\nThe program should read three integers: the number of students in each of the three classes, a, v and c respectively.\nIn the first test there are three groups. The first group has students and thus needs 10 desks,\nThe second group has 21 students. so that can get by with no fewer than 11 desks in total\n'''\n\nstudentsIn_Class1= int(input('enter number of students'))\nstudentsIn_Class2= int(input('enter number of students in class 2'))\nstudentsIn_Class3= int(input('enter numbbber of students in class 3'))\ntotalStudents= studentsIn_Class1+studentsIn_Class2+studentsIn_Class3\nnumberOfBenches = totalStudents/2\nnumber = round(numberOfBenches)\nprint(number,' benches are required in the class')\n\n","sub_path":"Python Sum/question 5.py","file_name":"question 5.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"289652229","text":"import random\r\n\r\nprint (\"fam\")\r\n\r\nok = False\r\n\r\nwhile not ok:\r\n\ttoGuess = random.randint(1, 100)\r\n\tpartitaFinita = False\r\n\ttentativi = 0\r\n\tok = False\r\n\took = False\r\n\r\n\twhile not partitaFinita:\r\n\t\tguess = int(input(\"prova\\n\"))\r\n\t\tif guess == toGuess:\r\n\t\t\tpartitaFinita = True\r\n\t\t\tprint(\"gg, ci hai messo \" + str(tentativi) + \" tentativi\")\r\n\t\t\t\r\n\t\t\tchoice = input(\"Play again? y/n\\n\")\r\n\t\t\twhile not ook:\r\n\t\t\t\tif choice == \"n\":\r\n\t\t\t\t\tok = True;\r\n\t\t\t\t\took = True\r\n\t\t\t\telif choice != \"y\":\r\n\t\t\t\t\tprint(\"no\")\r\n\t\t\t\telse:\r\n\t\t\t\t\took = True\r\n\t\telse:\r\n\t\t\ttentativi+=1\r\n\t\t\tif guess > toGuess:\r\n\t\t\t\tprint(\"di -\")\r\n\t\t\telse:\r\n\t\t\t\tprint(\"di +\")\r\n","sub_path":"Python/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"252023000","text":"from datetime import datetime\nfrom collections import namedtuple\nimport locale\nimport csv\nimport warnings\nfrom sys import exit\n\n#TODO:\n# 1. Make this file also function as a script.\n# - specify a bank and input path + call bank2ynab\n# 1. create a mapping from companies in the 'trasaction' field to ynab payees\n# 2. User dialog window asking what the payee should be when no payee was found and\n# save this to choice to file.\n\n\n# ------- On YnabEntry -------\n# Categories will only import if the category already exists in your budget\n# file with the exact same name. Otherwise the categories will be ignored\n# when importing the file. Also, make sure that the categories are listed\n# with the master category, followed by a colon, then the sub category.\n# For example:\n# Everyday Expenses: Groceries\n#\n# Payee and category are currently always set to empty string\n\n\nclass Converter:\n __REQUIRED_HEADERS=('date', 'amount')\n\n def __init__(self, bankHeader, delimiter):\n for required_header in self.__REQUIRED_HEADERS:\n if required_header not in [bh.lower() for bh in bankHeader]:\n raise ValueError(f\"'{required_header}' is a required column, but\"\n f\" it's not in the bank's header:\\n {bankHeader}\")\n\n # Specified by YNAB4\n self.YNABHeader = ['Date', 'Payee', 'Category', 'Memo', 'Outflow', 'Inflow']\n self.YnabEntry = namedtuple('YnabEntry', ' '.join(self.YNABHeader).lower())\n\n self.BankEntry = namedtuple('BankEntry', ' '.join(bankHeader).lower())\n self.csvDelimiter = delimiter\n\n self.ignoredRows = []\n self.readRows = []\n self.parsedRows = []\n self.numEmptyRows = 0\n\n def convert(self, inputPath, toIgnore=[]):\n if not inputPath:\n return\n\n # Attempt to parse input file to a YNAB-formatted csv file\n # May raise OSError\n bankData = self.readInput(inputPath, toIgnore)\n parsed = self.parseRows(bankData)\n\n return self.writeOutput(parsed)\n\n def readInput(self, inputPath, toIgnore):\n with open(inputPath, encoding='utf-8', newline='') as inputFile:\n reader = csv.reader(inputFile, delimiter=self.csvDelimiter)\n try:\n for row in reader:\n if reader.line_num == 1: # Skip first row (header)\n continue\n\n if (row and len(row) != namedtupleLen(self.BankEntry)):\n msg = f' expected row length {namedtupleLen(self.BankEntry)},' \\\n f' but got {len(row)} ({row})'\n warnings.warn(badFormatWarn(msg), RuntimeWarning)\n elif row:\n bankRow = self.BankEntry._make(row)\n if 'payee' in bankRow._fields:\n for i in toIgnore:\n if i not in bankRow.payee:\n self.readRows.append(bankRow)\n else:\n self.ignoredRows.append(bankRow)\n else:\n self.readRows.append(bankRow)\n else:\n warnings.warn(\n f'\\n\\tSkipping row {reader.line_num}: {row}',\n RuntimeWarning)\n self.numEmptyRows += 1\n except csv.Error as e:\n raise OSError(f'file {inputFile}\\n line {row}: {e}')\n else:\n print('{0}/{1} line(s) successfully read '\n '(ignored {2} blank line(s) and '\n '{3} transactions found in accignore).'\n .format(len(self.readRows), reader.line_num-1, self.numEmptyRows, len(self.ignoredRows)))\n\n return self.readRows\n\n\n def readOptionalField(self, lambdaFun, alt=''):\n try:\n res = lambdaFun()\n except AttributeError:\n res = alt\n return res\n\n def parseRows(self, bankRows):\n for row in bankRows:\n try:\n self.parsedRows.append(self.parseRow(row))\n except (ValueError, TypeError) as e:\n msg = f'\\n\\t{row}\\n\\tError: {e}'\n warnings.warn(badFormatWarn(msg), RuntimeWarning)\n\n print(f'{len(self.parsedRows)}/{len(bankRows)} line(s) successfully parsed ')\n\n return self.parsedRows\n\n def parseRow(self, bankline):\n if type(bankline) is not self.BankEntry:\n raise TypeError(f'{bankline} is not a line from the bank\\'s csv-file')\n\n # payee and memo are not a mandatory fields; set only if they exist\n payee = self.readOptionalField(lambda: bankline.payee)\n memo = self.readOptionalField(lambda: bankline.memo)\n\n strAmount = bankline.amount.strip()\n amountSign = '-' if strAmount[0] == '-' else '+'\n\n bankInflow = strAmount if amountSign == '+' else ''\n bankOutflow = strAmount[1::] if amountSign == '-' else ''\n\n date = datetime.strptime(bankline.date, '%Y-%m-%d') #convert to date\n dateStr = date.strftime('%Y/%m/%d') # desired format\n\n return self.YnabEntry(date=dateStr, payee=payee, category='',\n memo=memo, outflow=bankOutflow,\n inflow = bankInflow)\n\n def writeOutput(self, parsedRows):\n hasWritten = False\n\n if(parsedRows == None or len(parsedRows) == 0):\n return hasWritten\n\n with open('ynabImport.csv', 'w', encoding='utf-8', newline='') as outputFile:\n writer = csv.writer(outputFile)\n try:\n writer.writerow(self.YNABHeader)\n writer.writerows(parsedRows)\n hasWritten = True\n print('YNAB csv-file successfully written.')\n except csv.Error as e:\n raise OSError(f'File {outputFile}, line {writer.line_num}: {e}')\n finally:\n return hasWritten\n\n\ndef namedtupleLen(tupleArg):\n return len(tupleArg._fields)\n\n\ndef readIgnore():\n accounts = []\n try:\n with open('accignore.txt', encoding='utf-8', newline='') as ignored:\n for account in ignored:\n accounts.append(account)\n msg = f'Ignoring transactions from account(s): {accounts}'\n except OSError:\n msg = 'Parsing all transactions...'\n print(msg)\n return accounts\n\n\ndef badFormatWarn(entry):\n return f'\\n\\tIncorrectly formated row:{entry}\\n\\t Skipping...'\n\n\ndef bank2ynab(bank, csvFilePath):\n \"\"\" Perform the conversion from a bank csv-file to YNAB's csv format\n \"\"\"\n converter = Converter(bank.header, bank.delimiter)\n\n # Check for accignore.txt and obtain a list ofignored accounts.\n try:\n ignoredAccounts = readIgnore()\n except OSError:\n ignoredAccounts = [] # It's okay to not have it.\n\n # Do the conversion:\n # fetch file, attempt parsing, write output, and return results.\n hasConverted = converter.convert(csvFilePath, ignoredAccounts)\n return (hasConverted, converter.numEmptyRows, len(converter.ignoredRows),\n len(converter.readRows), len(converter.parsedRows))\n","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":7247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"493283661","text":"import matplotlib.pyplot as plt\n\nslices = [7, 2, 2, 5, 13]\nlabels = [\"a\", \"b\", \"c\", \"d\", \"e\"]\n\n# slices = data\n# labels = names for values\n# autopct = show percentage\n# explode = drag a piece out\nplt.pie(slices, labels=labels, colors=['red', 'blue', 'green', 'white', 'yellow'], shadow=True, startangle=0,\n counterclock=False,\n autopct='%1.1f%%', explode=[0, 0, 0.1, 0, 0])\n\nplt.show()\n","sub_path":"Practice 6.py","file_name":"Practice 6.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"389659928","text":"#!/usr/bin/env python3\nimport argparse\nfrom lark import Lark\n\ndef parse(source):\n parser = Lark(\"\"\"\n\n \"\"\")\n\n ast = parser.parse(source)\n idl = {}\n return idl\n\ndef c_generator(idl):\n text = \"\"\n text += f\"#ifndef __IDL_{idl['name']}_H__\\n\"\n text += f\"#define __IDL_{idl['name']}_H__\\n\"\n text += \"#endif\\n\"\n with open(args.out, \"w\") as f:\n f.write(generate(text))\n\ndef main():\n parser = argparse.ArgumentParser(description=\"The IDL stub generator.\")\n parser.add_argument(\"--idl\", required=True, help=\"The IDL file.\")\n parser.add_argument(\"--lang\", choices=[\"c\"], default=\"c\",\n help=\"The output language.\")\n parser.add_argument(\"-o\", dest=\"out\", required=True,\n help=\"The output directory.\")\n args = parser.parse_args()\n\n with open(args.idl) as f:\n idl = parse(f.read())\n\n if args.lang == \"c\":\n c_generator(idl)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tools/idlstub.py","file_name":"idlstub.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"436586028","text":"from importlib import import_module\nfrom sys import argv\n\nfrom telethon.errors.rpcerrorlist import PhoneNumberInvalidError\nfrom Kora import bot\nfrom Kora.modules import ALL_MODULES\n\nINVALID_PH = '\\nERROR: The Phone No. entered is INVALID' \\\n '\\n Tip: Use Country Code along with number.' \\\n '\\n or check your phone number and try again !'\n\ntry:\n bot.start()\nexcept PhoneNumberInvalidError:\n print(INVALID_PH)\n exit(1)\n\nfor module_name in ALL_MODULES:\n imported_module = import_module(\"Kora.modules.\" + module_name)\n\n\nif len(argv) not in (1, 3, 4):\n bot.disconnect()\nelse:\n print(\"bot running now...\")\n bot.run_until_disconnected()","sub_path":"Kora/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"217133483","text":"#!/usr/bin/env python\r\nimport numpy as np\r\nfrom Util import *\r\nfrom Cl import *\r\nfrom Log import *\r\nfrom ClMod import *\r\nfrom Camera import *\r\nfrom GUI import *\r\nfrom PyQt4.QtGui import QMouseEvent\r\n\r\nclass Cube:\r\n\tdef __init__ (self):\r\n\t\tlog.logItem(\"Cube.__init__\")\r\n\t\t\r\n\t\tself.wMtx = np.array([\r\n\t\t\t1, 0, 0, 0,\r\n\t\t\t0, 1, 0, 0,\r\n\t\t\t0, 0, 1, 0,\r\n\t\t\t0, 0, 0, 1\r\n\t\t], dtype=np.float32)\r\n\t\tself.wMtx = self.wMtx.reshape(4,4)\r\n\t\t\r\n\t\tself.cubelets = []\r\n\t\tself.cubeletmodel = None\r\n\t\t\r\n\t\tself.cam = None\r\n\t\t\r\n\t\tself.curSlice = None\r\n\t\tself.sliceRotation = 0.\r\n\t\tself.sliceForward = True\r\n\t\tself.sliceRotRate = 0.\r\n\t\tself.defaultRotRate = 720.\r\n\t\tself.sliceRotAxis = [1,0,0]\r\n\t\tself.runRandomSliceRot = False\r\n\t\tself.randomRots = 60\r\n\t\tself.mouseSeq = []\r\n\t\t\r\n\t\t#weird spin cube effect I just thought of\r\n\t\tself.spinCube = False\r\n\t\tself.spinRate = 10.\r\n\t\t\r\n\t\tself.scaleCube = False\r\n\t\tself.scaleCubeExtent = .1\r\n\t\tself.scaleCubeOffset = 1.1\r\n\t\tself.thetaRate = np.pi / 90.\r\n\t\tself.theta = 0.\r\n\t\t\r\n\t\tself.gui = None\r\n\t\t\r\n\tdef setPosAndOrientation(self,args):\r\n\t\tposW = args[0:3]\r\n\t\trightW = args[3:6]\r\n\t\tupW = args[6:9]\r\n\t\tlookW = args[9:12]\r\n\t\t\r\n\t\twm = []\r\n\t\tfor i in range(0,3):\r\n\t\t\twm.extend([rightW[i], upW[i], lookW[i], 0.])\r\n\t\t\r\n\t\twm.extend(posW)\r\n\t\twm.append(1.)\r\n\t\t\r\n\t\twm = np.array(wm, dtype=np.float32)\r\n\t\tself.wMtx = wm.reshape(4,4)\r\n\t\t\r\n\tdef initialize (self):\r\n\t\tlog.logItem(\"Cube.initialize\")\r\n\t\tself.cubeletmodel = CubeletModel()\r\n\t\tself.cubeletmodel.initialize()\r\n\t\t\r\n\t\tself.cam = OrbitalCamera()\r\n\t\t\r\n\t\tself._makeCubelets()\r\n\t\t\r\n\t\tself.gui = GUI()\r\n\t\tself.gui.initialize(self)\r\n\t\t\r\n\tdef resize(self,width,height):\r\n\t\tlog.logItem(\"Cube.resize\")\r\n\t\t#self.cam.setOrthoProjMtx(-5.,5.,-5.,5.,-10.,10.)\r\n\t\tself.cam.setPerspectiveMtx(200., float(width) / float(height), 0.05, 1000.0)\r\n\t\tself.gui.resize(width,height)\r\n\t\r\n\tdef update (self,dt):\r\n\t\tself.handleSliceRotation(dt)\r\n\t\t\r\n\t\tself.gui.update(dt)\r\n\t\t\r\n\t\tif self.curSlice == None and self.runRandomSliceRot == True:\r\n\t\t\tself.randomRots = self.randomRots - 1\r\n\t\t\tsid = np.random.randint(0,6)\r\n\t\t\tforward = bool(np.random.randint(0,2))\r\n\t\t\tself.startSliceRotation(sid,forward)\r\n\t\t\t\r\n\t\t\tif self.randomRots == 0:\r\n\t\t\t\tself.runRandomSliceRot = False\r\n\t\t\t\r\n\t\tif self.spinCube == True:\r\n\t\t\tangle = self.spinRate * dt\r\n\t\t\trot = np.dot(Mat4RotationX(angle),Mat4RotationY(angle))\r\n\t\t\trot = np.dot(rot, Mat4RotationZ(angle))\r\n\t\t\tself.wMtx = np.dot(self.wMtx,rot)\r\n\t\t\t\r\n\t\tif self.scaleCube == True:\r\n\t\t\tself.theta = self.theta + (dt * self.thetaRate)\r\n\t\t\twhile self.theta > np.pi * 2.:\r\n\t\t\t\tself.theta = self.theta - (np.pi * 2.)\r\n\t\t\t\t\r\n\t\t\ts = (np.sin(self.theta) * self.scaleCubeExtent) + self.scaleCubeOffset\r\n\t\t\ts = Mat4Scaling([s,s,s])\r\n\t\t\tself.wMtx = np.dot(s, self.wMtx)\r\n\t\t\r\n\tdef render (self):\r\n\t\tself.gui.render()\r\n\t\t\r\n\t\tfor cubelet in self.cubelets:\r\n\t\t\t#draw the damn cubes\r\n\t\t\tself.cubeletmodel.Draw(self.wMtx, cubelet, self.cam)\r\n\t\t\r\n\t\t#pass\r\n\t\t\t\r\n\tdef handleSliceRotation (self, dt):\r\n\t\tif self.curSlice is None: return\r\n\t\t\r\n\t\tif (self.sliceRotation == .0):\r\n\t\t\tself.curSlice = None\r\n\t\t\treturn\r\n\t\t\r\n\t\trot = None\r\n\t\tangle = self.sliceRotRate * dt\r\n\t\tif (self.sliceRotation < angle):\r\n\t\t\tangle = self.sliceRotation\r\n\t\t\tself.sliceRotation = 0.\r\n\t\telse:\r\n\t\t\tself.sliceRotation = self.sliceRotation - angle\r\n\t\tif self.sliceForward == False:\r\n\t\t\tangle = angle * -1.\r\n\t\t\r\n\t\t#if self.sliceRotAxis == 'x':\r\n\t\t\t#rot = Mat4RotationX(angle)\r\n\t\t#if self.sliceRotAxis == 'y':\r\n\t\t\t#rot = Mat4RotationY(angle)\r\n\t\t#if self.sliceRotAxis == 'z':\r\n\t\t\t#rot = Mat4RotationZ(angle)\r\n\t\trot = Mat4RotAxis(self.sliceRotAxis,angle)\r\n\t\t\t\r\n\t\t#if rot == None: raise Exception(\"Undefined condition: unknown axis!\")\r\n\t\t\r\n\t\tfor cubelet in self.curSlice:\r\n\t\t\tcubelet.transform(rot)\r\n\t\t\t\r\n\tdef startSliceRotation(self, sid, forward):\r\n\t\tif self.curSlice is not None: return\r\n\t\tself.curSlice = []\r\n\t\t\r\n\t\tavm = self.cam.getAdjustedViewMtx()\r\n\t\tiavm = np.linalg.inv(avm);\r\n\t\t\r\n\t\tfor cubelet in self.cubelets:\r\n\t\t\tposW = cubelet.posW[:]\r\n\t\t\tposW.append(1.0)\r\n\t\t\tposW = np.array(posW,dtype=np.float32).reshape(4)\r\n\t\t\tposAVS = np.dot(posW,avm)\r\n\t\t\t\r\n\t\t\tif sid == 0 and posAVS[0] < -1.0:\r\n\t\t\t\tself.curSlice.append(cubelet)\r\n\t\t\t\t\r\n\t\t\telif sid == 1 and posAVS[0] > 1.0:\r\n\t\t\t\tself.curSlice.append(cubelet)\r\n\t\t\t\t\r\n\t\t\telif sid == 2 and posAVS[1] < -1.0:\r\n\t\t\t\tself.curSlice.append(cubelet)\r\n\t\t\t\t\r\n\t\t\telif sid == 3 and posAVS[1] > 1.0:\r\n\t\t\t\tself.curSlice.append(cubelet)\r\n\t\t\t\t\r\n\t\t\telif sid == 4 and posAVS[2] < -1.0:\r\n\t\t\t\tself.curSlice.append(cubelet)\r\n\t\t\t\t\r\n\t\t\telif sid == 5 and posAVS[2] > 1.0:\r\n\t\t\t\tself.curSlice.append(cubelet)\r\n\t\t\r\n\t\t#this is a little dirty ... think on this:\r\n\t\t#maybe replace this with a hard coded array of arrays?\r\n\t\tif sid == 0:\r\n\t\t\tself.sliceRotAxis = np.array([-1,0,0,0],dtype=np.float32)\r\n\t\t\t\r\n\t\telif sid == 1:\r\n\t\t\tself.sliceRotAxis = np.array([1,0,0,0],dtype=np.float32)\r\n\t\t\t\r\n\t\telif sid == 2:\r\n\t\t\tself.sliceRotAxis = np.array([0,-1,0,0],dtype=np.float32)\r\n\t\t\t\r\n\t\telif sid == 3:\r\n\t\t\tself.sliceRotAxis = np.array([0,1,0,0],dtype=np.float32)\r\n\t\t\t\r\n\t\telif sid == 4:\r\n\t\t\tself.sliceRotAxis = np.array([0,0,-1,0],dtype=np.float32)\r\n\t\t\t\r\n\t\telif sid == 5:\r\n\t\t\tself.sliceRotAxis = np.array([0,0,1,0],dtype=np.float32)\r\n\t\t\t\t\r\n\t\tif len(self.curSlice) != 9:\r\n\t\t\traise Exception(\"Undefined condition: slice doesn't have 9 cubelets!\")\r\n\t\t\t\t\t\r\n\t\tself.sliceRotation = 90.0\r\n\t\tself.sliceForward = forward\r\n\t\tself.sliceRotRate = self.defaultRotRate\r\n\t\tself.sliceRotAxis = np.dot(self.sliceRotAxis,iavm)\r\n\t\t\r\n\tdef startCubeInversion (self,sid,forward):\r\n\t\tif self.curSlice != None: return\r\n\t\tself.curSlice = []\r\n\t\t\r\n\t\tavm = self.cam.getAdjustedViewMtx()\r\n\t\tiavm = np.linalg.inv(avm)\r\n\t\t\r\n\t\tfor cubelet in self.cubelets:\r\n\t\t\tself.curSlice.append(cubelet)\r\n\t\t\t\r\n\t\tself.sliceRotAxis = np.array([1.0,0.0,0.0,0.0], dtype=np.float32).reshape(4)\r\n\t\t\r\n\t\tself.sliceRotation = 180.0\r\n\t\tself.sliceForward = True\r\n\t\tself.sliceRotRate = self.defaultRotRate\r\n\t\tself.sliceRotAxis = np.dot(self.sliceRotAxis,iavm)\r\n\t\t\r\n\tdef onMouseUp (self, eve):\r\n\t\t#if eve.button() == 2 and len(self.mouseSeq) > 0:\r\n\t\t\t#self.startSliceRotation(self.mouseSeq.pop(),True)\r\n\t\t\r\n\t\tself.cam.onMouseUp(eve)\r\n\t\tself.gui.onMouseUp(eve)\r\n\t\t\r\n\t\t#might want to test location decide to either pass\r\n\t\t#the event to the gui or to the camera based on location\r\n\t\t\r\n\tdef onMouseDown (self, event):\r\n\t\tself.cam.onMouseDown(event)\r\n\t\tself.gui.onMouseDown(event)\r\n\t\t\r\n\tdef onMouseMove (self, event):\r\n\t\tself.cam.onMouseMove(event)\r\n\t\tself.gui.onMouseMove(event)\r\n\t\t\r\n\tdef onZoomIn (self):\r\n\t\tself.cam.zoom(-1.0)\r\n\t\t\r\n\tdef onZoomOut (self):\r\n\t\tself.cam.zoom(1.0)\r\n\t\t\r\n\tdef cleanup (self):\r\n\t\tlog.logItem(\"Cube.cleanup\")\r\n\t\tself.cubeletmodel.cleanup()\r\n\t\tself.gui.cleanup()\r\n\t\t\r\n\tdef _makeCubelets(self):\t\r\n\t\tlog.logItem(\"Cube._makeCubelets\")\r\n\t\t\r\n\t\toffset = 2.2\r\n\t\t\r\n\t\tfor y in range (0,3):\r\n\t\t\tfor x in range (0,3):\r\n\t\t\t\tfor z in range(0,3):\r\n\t\t\t\t\tif y == 1 and z == 1 and x == 1: continue\r\n\t\t\t\t\t\r\n\t\t\t\t\tcurCL = Cubelet()\r\n\t\t\t\t\tcurCL.setPosAndOrientation([\r\n\t\t\t\t\t\tfloat(x-1) * offset, float(y-1) * offset, float(z-1) * offset,\r\n\t\t\t\t\t\t1., 0., 0.,\r\n\t\t\t\t\t\t0., 1., 0.,\r\n\t\t\t\t\t\t0., 0., 1.\r\n\t\t\t\t\t])\r\n\t\t\t\t\t\r\n\t\t\t\t\tcurCL.addSticker(0)\r\n\t\t\t\t\t#for i in range(0,7):\r\n\t\t\t\t\t\t#curCL.addSticker(i)\r\n\t\t\t\t\t\t\r\n\t\t\t\t\tself.cubelets.append(curCL)\r\n\t\t\t\t\t\r\n\t\tfor cubelet in self.cubelets:\r\n\t\t\t\tif cubelet.posW[0] > 1.0:\r\n\t\t\t\t\tcubelet.addSticker(1)\r\n\t\t\t\t\r\n\t\t\t\tif cubelet.posW[0] < -1.0:\r\n\t\t\t\t\tcubelet.addSticker(2)\r\n\t\t\t\t\t\r\n\t\t\t\tif cubelet.posW[2] > 1.0:\r\n\t\t\t\t\tcubelet.addSticker(4)\r\n\t\t\t\t\t\r\n\t\t\t\tif cubelet.posW[2] < -1.0:\r\n\t\t\t\t\tcubelet.addSticker(3)\r\n\t\t\t\t\t\r\n\t\t\t\tif cubelet.posW[1] > 1.0:\r\n\t\t\t\t\tcubelet.addSticker(5)\r\n\t\t\t\t\t\r\n\t\t\t\tif cubelet.posW[1] < -1.0:\r\n\t\t\t\t\tcubelet.addSticker(6)","sub_path":"Cube.py","file_name":"Cube.py","file_ext":"py","file_size_in_byte":7627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"89675894","text":"from multiprocessing import Pool, cpu_count\r\nimport time as t\r\nimport numpy as np\r\n#import os\r\n#%%\r\ndef Rand_Func(args):\r\n (arg1,n) = args\r\n return(np.random.uniform(0,arg1,n))\r\n\r\nif __name__ == \"__main__\":\r\n start_time=t.time()\r\n print(\"Pool Test\")\r\n print()\r\n n=10000000\r\n num_process = max(cpu_count()-1,2)\r\n p=Pool(num_process)\r\n c_arr=[4,2,2,4,2]\r\n args=[(c,n) for c in c_arr]\r\n result=p.map(Rand_Func,args)\r\n\r\n #%%\r\n def Func(a,b,c,d,e):\r\n result=50+(1*a+0*b+0*c+0*d+4*e)\r\n return(result)\r\n\r\n Volume=((4-0)*(2-0)*(2-0)*(4-0)*(2-0))\r\n A=Func(result[0],result[1],result[2],result[3],result[4])\r\n B=np.sum(A)\r\n C=B/n\r\n D=C*Volume\r\n print(D)\r\n p.close()\r\n p.terminate()\r\n p.join()\r\n #%%\r\n end_time=t.time()\r\n elapsed_time=end_time-start_time\r\n print(elapsed_time)\r\n# os.system(\"pause\")\r\n","sub_path":"MonteCarloIntegratioMultiprocessingPoolPython.py","file_name":"MonteCarloIntegratioMultiprocessingPoolPython.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"199339866","text":"t = int(input())\nb = int(input())\narr = []\nfor i in range (b):\n s,d = map(int,input().split())\n arr.append((s,d))\nPI = 3.141592654\narr.sort(key = lambda x: x[1])\ntotal = 0\nfor i in arr:\n c = i[1]*2*PI\n if t >= i[0] * c:\n total += i[0]\n t -= i[0]*c\n else:\n total += t//c\n break\nprint(int(total))","sub_path":"ICPC/HCMUS-ICPC-2020/src/K.py","file_name":"K.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"439546","text":"def main():\n python = ('python',1991)\n ruby = ('ruby',1995)\n go = ('go',2009)\n\n programming = python, ruby, go\n\n for lang in programming:\n name, year = lang\n print(name, year, ' birth')\n\nif __name__ == '__main__':\n main()","sub_path":"taple/unpack_for.py","file_name":"unpack_for.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"197405440","text":"from netsuite.utils import obj\n\ntest_data = {\n 'line_items': [\n\t\t{'internal_id': 165, 'quantity': 1}\n\t],\n\t'first_name': 'Joe',\n\t'last_name': 'Bloggs',\n\t'phone_number': '777777777',\n\t'email': 'fmalina@gmail.com',\n\t'password': 'ovcaaaa',\n\t'marketing_agreement': True,\n 'shipping_address': {\n\t 'address_line_1': '777 Green Avenue',\n\t 'address_line_2': 'Spring Hill',\n\t 'city': 'Springfield',\n\t 'region': 'OR',\n\t 'zip_code': '12121',\n\t 'country': 'United States'\n },\n\t'credit_card_number': '4444333322221111',\n\t'credit_card_owner': 'J. Bloggs',\n\t'expiration_date_month': '1',\n\t'expiration_date_year': '2018',\n\t'cvc2': '333',\n\t'shipping_cost': 7.99\n}\ntest_data['billing_address'] = test_data['shipping_address']\n\ndata = obj(test_data)\n\n\ndef prepare_customer_data(data):\n return {\n 'firstName': data.first_name,\n 'lastName': data.last_name,\n 'phone': data.phone_number,\n 'email': data.email\n }\n\n\ndef prepare_address(addressee, address):\n return {\n 'addressee': addressee,\n 'addr1': address.address_line_1,\n 'addr2': address.address_line_2,\n 'state': address.region,\n 'city': address.city,\n 'zip': address.zip_code.upper(),\n # 'country': '_unitedStates'\n }\n","sub_path":"netsuite/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"630713646","text":"#!/usr/bin/env python\n\n\"\"\"\nbat.py\nAuthor: Samuel Love\nYear: 2020\n\nSubscribes to battery topic and prints a calculated\nbattery percentage to the console window\n\"\"\"\n\nimport rospy\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import BatteryState\n\ndef callback(data):\n # Extract voltage from BatteryState variable\n print(\"Battery level is: %f volts\" % (data.voltage))\n # Calculate battery percentage from voltage\n batteryPercent = ((data.voltage - 9)/3.6)*100\n print(\"Battery percentage is: %f percent\" % (batteryPercent))\n\ndef listener():\n batterystate = BatteryState()\n rospy.init_node('batpercent', anonymous=True)\n rospy.Subscriber('battery', BatteryState, callback)\n rospy.spin() # Keep rospy node running until input is recieved\n\nif __name__ == '__main__':\n listener()\n","sub_path":"scripts/bat.py","file_name":"bat.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"24078875","text":"import cPickle as pickle\nfrom matplotlib.patches import Polygon\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\ndef mask_distill(mask, im):\n '''\n :param mask: points corrdinates generated by mask r-cnn to mask a child\n :param im: original image where there is a child\n :return: the original image without the child\n '''\n\n # step 1 : transfer the original mask into x, y corrdinates\n mask = np.concatenate(mask).reshape(-1,2)\n mask1 = np.zeros(shape = (im.shape[0], im.shape[1]))\n mask1[mask[:,1], mask[:,0]] = 1\n #plt.figure(figsize=(10,10))\n #plt.imshow(mask1)\n\n # step 2 : use diff to eliminate the point of mask which is close to each other\n mask_diff = mask1 - np.hstack([np.zeros(shape=(mask1.shape[0],1)), mask1[:,:-1]])\n\n mask_diff[mask_diff == -1] = 0\n plt.imshow(mask_diff)\n mask1_cumsum = mask_diff.cumsum(1)\n mask1_cumsum[mask1_cumsum %2 == 0] = 0\n mask1_cumsum[mask1_cumsum != 0] = 1\n\n # step 3 : 消除奇数mask造成的异常\n bandwidth = 2\n mask_up = np.vstack([np.zeros(shape=(bandwidth, mask1_cumsum.shape[1])), mask1_cumsum[:-bandwidth, :]])\n mask_down = np.vstack([mask1_cumsum[bandwidth:, :], np.zeros(shape=(bandwidth, mask1_cumsum.shape[1]))])\n flag = (1 - mask_down == mask1_cumsum) * (1 - mask_up == mask1_cumsum)\n mask1_cumsum[flag] = 1 - mask1_cumsum[flag]\n plt.imshow(mask1_cumsum)\n\n # # dilation and erode\n # kernel = np.ones((50,50),np.uint8)\n # mask_for_origin_cumsum = cv2.dilate(mask_for_origin_cumsum,kernel,iterations = 1)\n # mask_for_origin_cumsum = cv2.erode(mask_for_origin_cumsum,kernel,iterations = 1)\n # # plt.show()\n # mask_for_origin_cumsum.shape\n\n imshow = im.copy()\n for layer in range(3):\n imshow[...,layer] = imshow[:,:,layer]*(1 - mask1_cumsum)\n # plt.imshow(imshow)\n plt.imsave(imshow, 'bg_only.jpg')\n\n\n\ndef main():\n res_dict = []\n with open('/Users/pengyuyan/Desktop/result.pkl', 'rb') as d:\n res_dict = pickle.load(d)\n\n os.chdir('/Users/pengyuyan/Desktop/images')\n\n thresh = 0.7\n box_alpha = 0.5\n im = cv2.imread('1504174816738-1244746321.jpg')\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n im_info = res_dict[0]\n\n dpi = 200\n fig = plt.figure(frameon=False)\n fig.set_size_inches(im.shape[1] / dpi, im.shape[0] / dpi)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n #ax.axis('off')\n fig.add_axes(ax)\n #ax.imshow(im,cmap = 'gray')\n ax.imshow(im)\n\n mask = im_info.values()[0][0]['mask']\n #contour = res_dict[0]['1504174816738-1244746321'][0]['mask']\n\n # for c in contour:\n # polygon = Polygon(\n # c.reshape((-1, 2)),\n # fill=True, facecolor='r',\n # edgecolor='w', linewidth=1.2,\n # alpha=0.5)\n # ax.add_patch(polygon)\n\n mask_distill(mask, im)\n\n\n","sub_path":"tools/mask_distillation.py","file_name":"mask_distillation.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115062110","text":"# -*-coding:utf-8-*-\nimport re\nimport urllib.parse\nfrom bs4 import BeautifulSoup\n\n\nclass HTMLParser:\n def parse_class_id_urls(self, base_url, html_content):\n '解析班级URL的HTML文本'\n if(html_content != None):\n # HTML文本构建BeautifulSoup对象\n soup = BeautifulSoup(\n html_content, 'html.parser')\n class_id_url_set = self._get_class_urls(base_url, soup)\n return class_id_url_set\n else:\n return None\n\n def parse_stu_info(self, html_content):\n '解析学生信息的HTML文本'\n if(html_content != None):\n # HTML文本构建BeautifulSoup对象\n soup = BeautifulSoup(\n html_content, 'html.parser')\n stu_info_list = self._get_stu_info(soup)\n return stu_info_list\n else:\n return None\n\n def _get_class_urls(self, base_url, soup):\n '抽取未处理的班级URL'\n class_id_url_set = set()\n class_id_urls = soup.find_all('a')\n for class_id_url in class_id_urls:\n url = class_id_url.get('href')\n full_url = urllib.parse.urljoin(base_url, url)\n class_id_url_set.add(full_url)\n return class_id_url_set\n\n def _get_stu_info(self, soup):\n '抽取未处理的学生信息'\n stu_info_list = list()\n stu_info_dict = dict()\n stu_infos = soup.find_all('td')\n flag = 0\n for stu_info in stu_infos[11:-1]:\n if(flag == 0):\n stu_info_dict[stu_infos[flag].string] = stu_info.string\n flag = flag+1\n elif(flag == 1):\n stu_info_dict[stu_infos[flag].string] = stu_info.string\n flag = flag+1\n elif(flag == 2):\n stu_info_dict[stu_infos[flag].string] = stu_info.string\n flag = flag+1\n elif(flag == 3):\n stu_info_dict[stu_infos[flag].string] = stu_info.string\n flag = flag+1\n elif(flag == 4):\n stu_info_dict[stu_infos[flag].string] = stu_info.string\n flag = flag+1\n elif(flag == 5):\n stu_info_dict[stu_infos[flag].string] = stu_info.string\n flag = flag+1\n elif(flag == 6):\n stu_info_dict[stu_infos[flag].string] = stu_info.string\n flag = flag+1\n elif(flag == 7):\n stu_info_dict[stu_infos[flag].string] = stu_info.string\n flag = flag+1\n elif(flag == 8):\n stu_info_dict[stu_infos[flag].string] = stu_info.string\n flag = flag+1\n elif(flag == 9):\n stu_info_dict[stu_infos[flag].string] = stu_info.string\n flag = flag+1\n elif(flag == 10):\n stu_info_dict[stu_infos[flag].string] = stu_info.string\n flag = 0\n stu_info_list.append(stu_info_dict.copy())\n stu_info_dict.clear()\n return stu_info_list\n","sub_path":"CQUPT_Stu_Crawler/html_parser.py","file_name":"html_parser.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"272132829","text":"from typing import List\n\n\nclass Environment:\n def __init__(self, dimension: int = 0):\n self.dimension = dimension\n\n\nclass Material:\n class Kind:\n STATIC_TEXTURE_AND_PARAMS = \"static_static\"\n STATIC_TEXTURE_DYNAMIC_PARAMS = \"static_dynamic\"\n DYNAMIC_TEXTURE_AND_PARAMS = \"dynamic_dynamic\"\n\n class Texture:\n RANDOM = \"random\"\n GOLD = \"gold\"\n MARBLE = \"marble\"\n CRYSTAL = \"crystal\"\n WOOD = \"wood\"\n\n def __init__(self,\n kind: str = \"\",\n texture: str = \"\",\n metallic: float = .0,\n specular: float = .0,\n roughness: float = .0\n ):\n self.kind = kind\n self.metallic = metallic\n self.specular = specular\n self.roughness = roughness\n self.texture = texture\n\n def static_texture_and_params(self,\n texture: str = \"\",\n metallic: float = .0,\n specular: float = .0,\n roughness: float = .0):\n self.kind = Material.Kind.STATIC_TEXTURE_AND_PARAMS\n\n self.texture = texture\n self.metallic = metallic\n self.specular = specular\n self.roughness = roughness\n\n return self\n\n def static_texture_dynamic_params(self, texture):\n self.kind = Material.Kind.STATIC_TEXTURE_DYNAMIC_PARAMS\n self.texture = texture\n\n return self\n\n def dynamic_texture_and_params(self):\n self.kind = Material.Kind.DYNAMIC_TEXTURE_AND_PARAMS\n self.texture = Material.Texture.RANDOM\n\n return self\n\n\nclass Object:\n def __init__(self,\n name: str,\n path: str,\n material: Material,\n normalize: bool = True):\n self.name = name\n self.path = path\n self.normalize = normalize\n self.material = material\n\n\nclass Light:\n class Kind:\n STATIC_LIGHT = \"static\" # no se mueve, tiene un color fijo\n DYNAMIC_LIGHT = \"dynamic\" # se mueve, tiene un color fijo\n RAINBOW_STATIC_LIGHT = \"rainbow_static_light\" # no se mueve, cambia de color\n RAINBOW_DYNAMIC_LIGHT = \"rainbow_dynamic_light\" # se mueve, cambia de color\n\n def __init__(self,\n kind: str = \"\",\n color: list = None,\n location: list = None,\n max_range: int = 0,\n max_energy: int = 10\n ):\n self.kind = kind\n self.color = color\n self.location = location\n self.max_range = max_range\n self.max_energy = max_energy\n\n def dynamic_light(self, color: list, max_range: int):\n assert len(color) == 3, \"color must have lenght of 3\"\n assert 0.0 <= color[0] <= 1.0, \"r-channel must be between 0 and 1\"\n assert 0.0 <= color[1] <= 1.0, \"g-channel must be between 0 and 1\"\n assert 0.0 <= color[2] <= 1.0, \"b-channel must be between 0 and 1\"\n\n self.color = color\n self.kind = self.Kind.DYNAMIC_LIGHT\n self.max_range = max_range\n return self\n\n def static_light(self, color: list, location: list):\n assert len(color) == 3, \"color must have lenght of 3\"\n assert 0.0 <= color[0] <= 1.0, \"r-channel must be between 0 and 1\"\n assert 0.0 <= color[1] <= 1.0, \"g-channel must be between 0 and 1\"\n assert 0.0 <= color[2] <= 1.0, \"b-channel must be between 0 and 1\"\n assert len(location) == 3, \"location must have lenght of 3\"\n\n self.color = color\n self.location = location\n self.kind = self.Kind.DYNAMIC_LIGHT\n return self\n\n def rainbow_static_light(self, location: list, max_energy: int):\n assert len(location) == 3, \"location must have lenght of 3\"\n\n self.kind = self.Kind.RAINBOW_STATIC_LIGHT\n self.location = location\n self.max_energy = max_energy\n return self\n\n def rainbow_dynamic_light(self, max_range: float, max_energy: int):\n self.kind = self.Kind.RAINBOW_DYNAMIC_LIGHT\n self.max_range = max_range\n self.max_energy = max_energy\n\n return self\n\n\nclass Viewpoint:\n class Kind:\n STATIC_CAMERA = \"static_camera\"\n DYNAMIC_CAMERA = \"dynamic_camera\"\n OBJECT_PATH = \"object_path\"\n\n def __init__(self, kind: str = \"\",\n location: list = None,\n amount: int = 0,\n size: int = 0,\n horizontal_divisions: int = 0,\n vertical_divisions: int = 0,\n max_range: int = 0\n ):\n self.max_range = max_range\n self.location = location\n self.kind = kind\n self.amount = amount\n self.size = size\n self.vertical_divisions = vertical_divisions\n self.horizontal_divisions = horizontal_divisions\n\n def static_camera_viewpoint(self,\n location: List,\n amount: int):\n self.kind = self.Kind.STATIC_CAMERA\n self.location = location\n self.amount = amount\n\n return self\n\n def dynamic_camera_viewpoint(self, amount: int, max_range: int):\n self.kind = self.Kind.DYNAMIC_CAMERA\n self.amount = amount\n self.max_range = max_range\n\n return self\n\n def espheric_path_viewpoint(self,\n size: int,\n horizontal_divisions: int,\n vertical_divisions: int):\n self.kind = self.Kind.OBJECT_PATH\n self.size = size\n self.horizontal_divisions = horizontal_divisions\n self.vertical_divisions = vertical_divisions\n\n return self\n\n\nclass Render:\n class Style:\n NORMAL = \"normal\"\n SILHOUETTE = \"silhouette\"\n TEXTURE_SEGMENTATION = \"texture-segmentation\"\n RAY_TRACED = \"ray-traced\"\n RASTERED = \"rastered\"\n\n def __init__(self, resolution_x: int, resolution_y: int, output_dir_path: str, styles: List[str]):\n self.resolution_x = resolution_x\n self.resolution_y = resolution_y\n self.output_dir_path = output_dir_path\n self.styles = styles\n","sub_path":"gentool/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":6234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"250518725","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom library import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='query'),\n url(r'^$', views.index, name='book'),\n url(r'^confirm/$', views.confirm, name='confirm'),\n # url(r'^(?P(man|woman))/$', views.man, name='room_name'),\n url(r'^man/$', views.ManListView.as_view(), name='man_room'),\n url(r'^woman/$', views.WomanListView.as_view(), name='woman_room'),\n # url(r'^woman_room/$', views.man_room, name='woman_room'),\n)\n","sub_path":"library/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"537386203","text":"from generator.server_log_class import ServerLog\nimport datetime\nimport random\nimport uuid\nimport pytz\n\n\n# Create a log generator using log class we created\nclass ServerLogGenerator(ServerLog):\n def __init__(self):\n self._random = random.random()\n self._location_country = [\"US\", \"IN\", \"UK\", \"CA\", \"AU\", \"DE\", \"ES\", \"FR\",\n \"NL\", \"SG\", \"RU\", \"JP\", \"BR\", \"CN\", \"OT\"]\n self._event_type = [\"click\", \"purchase\", \"login\", \"log-out\", \"delete-account\",\n \"create-account\", \"update-settings\", \"other\"]\n self._columns = ['event_id', 'account_id', 'event_type', 'device', 'location_country', 'event_timestamp']\n\n # basically we introduce some random anomalies in logs with this method\n def get_server_log(self):\n event_id = random.choices([uuid.uuid1(),\n 'N/A',\n None,\n 0],\n weights=[5, 0.01, 0.01, 0.01])[0]\n\n account_id = random.choices([random.randint(1, 1000),\n 'N/A',\n None,\n random.randint(10001, 100000)],\n weights=[5, 0.01, 0.01, 0.01])[0]\n\n event_type = random.choices([self._event_type[random.randint(0, len(self._event_type) - 1)],\n 'N/A',\n None,\n '---',\n 0], weights=[5, 0.01, 0.01, 0.01, 0.01])[0]\n\n device = random.choices(['ANDROID',\n 'IOS',\n None,\n 'unknown'],\n weights=[1, 0.08, 0.01, 0.01])[0]\n\n event_timestamp = random.choices([int(datetime.datetime.now(pytz.timezone(\"US/Pacific\")).timestamp()),\n datetime.date.today().isoformat(),\n 'N/A',\n None,\n 0],\n weights=[4, 0.01, 0.01, 0.01, 0.01])[0]\n\n location_country = random.choices([self._location_country[random.randint(0, len(self._location_country) - 1)],\n 'N/A',\n None,\n 'unknown',\n 0],\n weights=[5, 0.01, 0.01, 0.01, 0.01])[0]\n\n super().__init__(event_id, account_id, event_type, device, location_country, event_timestamp)\n\n # return instance attributes as dict\n return self.to_dict()\n\n\n","sub_path":"streaming-project-aws/generator/server_log_generator.py","file_name":"server_log_generator.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221870727","text":"from matplotlib import pyplot as plt\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, LSTM\nfrom sklearn.model_selection import train_test_split\n\nimport json\nimport numpy as np\nimport random\nimport sys\nimport time\n\nfrom utils import fmeasure, precision, recall, get_activations\n\n\n# Example usages:\n# python classifier.py visualize tr ../trained_models/model-20171209-193942.h5 20 'ayakkabi'\n# python classifier.py train tr\n# python classifier.py test tr ../trained_models/model-20171209-193942.h5 10\n\ndatasets = {\n 'tr': (\n '../datasets/akp-chp/akp.txt',\n '../datasets/akp-chp/chp.txt'\n ),\n 'en': (\n '../datasets/trump-clinton/trump.txt',\n '../datasets/trump-clinton/clinton.txt'\n )\n}\n\nif len(sys.argv) < 2:\n print('Enter arguments')\n sys.exit(0)\n\ndataset = datasets[sys.argv[2]]\nbatch_size = 1024\nsequence_length = 40\nsequence_count = int(1e+05)\nepochs = 70\nstep = None\n\n\ndef preprocess(text):\n if step:\n # sliding window on comments in order to utilize sentences more\n windowed_text = []\n for txt in text:\n for i in range(0, len(txt) - sequence_length, step):\n windowed_text.append(txt[i: i + sequence_length])\n\n print('Windowed sequences 0: {}'.format(len(windowed_text)))\n text = windowed_text\n\n random.shuffle(text)\n\n if len(text) < sequence_count:\n print('Not enough sequences')\n sys.exit(0)\n\n # we can't train all of them if data is too big\n return text[:sequence_count]\n\n\ntext0 = open(dataset[0]).read().lower().split('\\n')\ntext1 = open(dataset[1]).read().lower().split('\\n')\nprint('Text0 sequences: {} Text1 sequences: {}'.format(len(text0), len(text1)))\n\ntext0 = preprocess(text0)\ntext1 = preprocess(text1)\n\n# first set is labeled with 0, second with 1\ntext0_labels = np.zeros(len(text0), dtype=np.bool)\ntext1_labels = np.ones(len(text1), dtype=np.bool)\n\nsentences = text0 + text1\nprint('total sentence count {}'.format(len(sentences)))\n\n# trim the text if it's longer\nsentences = list(map(lambda s: s[:sequence_length], sentences))\nY = np.concatenate((text0_labels, text1_labels)).reshape((-1, 1))\n\nchars = ['\\n', ' ', '.', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',\n 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\nprint('total chars:', len(chars))\nchar_indices = dict((c, i) for i, c in enumerate(chars))\nindices_char = dict((i, c) for i, c in enumerate(chars))\n\nX = np.zeros((len(sentences), sequence_length, len(chars)), dtype=np.bool)\nfor i, s in enumerate(sentences):\n for t, char in enumerate(s):\n X[i, t, char_indices[char]] = 1\n\nif sys.argv[1] == 'train':\n x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.1, shuffle=True)\n\n print(len(x_train), 'train sequences')\n print(len(x_test), 'test sequences')\n\n # build the model: a single LSTM\n print('Build model...')\n model = Sequential()\n model.add(LSTM(128, input_shape=(sequence_length, len(chars)), return_sequences=True))\n model.add(LSTM(128, return_sequences=True))\n model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))\n model.add(Dense(1, activation='sigmoid'))\n\n model.compile(loss='binary_crossentropy', optimizer='adam',\n metrics=['accuracy', fmeasure, precision, recall])\n\n print('Train...')\n history = model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test))\n model_identifier = '{}'.format(time.strftime(\"%Y%m%d-%H%M%S\"))\n model.save('../trained_models/model-{}.h5'.format(model_identifier))\n score, acc, _, _, _ = model.evaluate(x_test, y_test, batch_size=batch_size)\n print('Test score:', score)\n print('Test accuracy:', acc)\n train_accs = history.history['acc']\n test_accs = history.history['val_acc']\n train_losses = history.history['loss']\n test_losses = history.history['val_loss']\n plt.subplot(2, 1, 1)\n plt.plot(range(epochs), train_accs, label=\"Train Accuracy\")\n plt.plot(range(epochs), test_accs, label=\"Test Accuracy\")\n plt.legend()\n plt.subplot(2, 1, 2)\n plt.plot(range(epochs), train_losses, label=\"Train Loss\")\n plt.plot(range(epochs), test_losses, label=\"Test Loss\")\n plt.legend()\n plt.savefig('../figures/figure-{}.png'.format(model_identifier))\n\n\nelif sys.argv[1] == 'test':\n iters = int(sys.argv[4])\n model_path = sys.argv[3]\n model = load_model(model_path)\n mask = np.random.randint(0, 2 * sequence_count - 1, size=(iters,))\n chosen_x = X[mask]\n chosen_y = Y[mask]\n chosen_s = [sentences[i] for i in mask]\n\n preds = model.predict_classes(chosen_x, batch_size=batch_size, verbose=0)\n human_preds = np.zeros(preds.shape)\n for i in range(iters):\n cur_s = chosen_s[i]\n cur_x = chosen_x[i]\n cur_y = chosen_y[i]\n cur_pred = preds[i]\n human_pred = int(input('AKP->0 CHP->1\\n{}\\n'.format(cur_s)))\n human_preds[i, 0] = human_pred\n print('Ground truth: {}\\nYour prediction: {}\\nModel prediction: {}\\n'\n .format(int(cur_y), human_pred, cur_pred[0]))\n print('Your accuracy: {}'.format(np.sum(human_preds == chosen_y) / preds.shape[0]))\n print('Model accuracy: {}'.format(np.sum(preds == chosen_y) / preds.shape[0]))\n\n\nelif sys.argv[1] == 'visualize':\n iters = int(sys.argv[4])\n model_path = sys.argv[3]\n model = load_model(model_path)\n\n try:\n substr = sys.argv[5]\n except IndexError:\n substr = None\n\n if substr:\n sentence_idxs = [i for i in range(len(sentences)) if substr in sentences[i]]\n if len(sentence_idxs) < iters:\n print('Not enough sentences with word {}'.format(substr))\n sys.exit(0)\n\n _mask = np.random.randint(0, len(sentence_idxs) - 1, size=(iters,))\n mask = np.array([sentence_idxs[idx] for idx in _mask])\n else:\n mask = np.random.randint(0, 2 * sequence_length - 1, size=(iters,))\n\n chosen_x = X[mask]\n chosen_y = Y[mask]\n chosen_s = [sentences[i] for i in mask]\n\n activations = get_activations(model, 0, chosen_x)\n\n real_data = []\n for i in range(iters):\n s = chosen_s[i]\n datum = {'pca': [], 'seq': s}\n for j in range(len(s)):\n datum['pca'].append(list(map(lambda x: float(x), activations[i, j, :])))\n real_data.append(datum)\n\n datasets = {'data': real_data}\n with open('cell.json', 'w') as outfile:\n json.dump(datasets, outfile)\n\n print('Wrote to json')\n","sub_path":"scripts/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":6593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"203284159","text":"# -*- coding: utf-8 -*-\n# Copyright 2018 OpenSynergy Indonesia\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom openerp import api, models, fields\n\n\nclass KBLapPosisiWipWizard(models.TransientModel):\n _name = \"l10n_id.kb_lap_posisi_wip_wizard\"\n _inherit = [\"l10n_id.date_range_selector\"]\n\n warehouse_ids = fields.Many2many(\n string=\"Warehouse\",\n comodel_name=\"stock.warehouse\",\n relation=\"rel_djbc_posisi_wip_2_warehouse\",\n column1=\"wizard_id\",\n column2=\"warehouse_id\"\n )\n\n @api.multi\n def action_print_sreen(self):\n waction = self.env.ref(\n \"laporan_posisi_wip.djbc_kb_lap_posisi_wip_action\")\n criteria = [\n (\"tgl_penerimaan\", \">=\", self.date_start),\n (\"tgl_penerimaan\", \"<=\", self.date_end),\n (\"warehouse_id\", \"in\", self.warehouse_ids.ids)\n ]\n waction.domain = criteria\n return waction.read()[0]","sub_path":"wizards/date_range_selector.py","file_name":"date_range_selector.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"293921164","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport utils\n\n\ndef dense(input_tensor, output_units, scope, activation=None):\n with tf.name_scope(scope):\n #shape of the weights matrix\n shape = (input_tensor.shape.as_list()[1], output_units)\n # weights matrix\n weights = tf.Variable(tf.truncated_normal(shape=shape, dtype=tf.float32), name='W')\n # bias vector (**brodcast**)\n bias = tf.Variable(tf.zeros(shape=[output_units], dtype=tf.float32), name='b')\n # define the layers as W * x + b\n layer = tf.add(tf.matmul(input_tensor, weights), bias)\n # add the squashing function (non linearity)\n if activation is not None:\n return activation(layer)\n else:\n return layer\n\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()\nprint('shape training images ', train_images.shape)\nprint('shape test images ', test_images.shape)\n\nnum_classes = 10\n\ni = np.random.randint(0, len(train_images))\nprint('displaying image {} with class {}'.format(i, train_labels[i]))\nplt.imshow(train_images[i], cmap='gray');\nplt.show()\n\ng = tf.Graph()\n\nplt.show()\n\nflatten_shape = np.prod(train_images.shape[1:])\n\nwith g.as_default():\n X = tf.placeholder(tf.float32, [None, flatten_shape], name='X')\n y = tf.placeholder(tf.float32, [None, 10], name='y')\n\nutils.show_graph(g)\n\nwith g.as_default():\n # define the model\n l1 = dense(X, 32, 'h1', activation=tf.nn.sigmoid)\n l2 = dense(l1, 64, 'h2', activation=tf.nn.relu)\n logits = dense(l1, 10, 'out', activation=None)\n\n # define the loss function\n loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(\n logits=logits, labels=y))\n\n # define the optimizer\n optmizer = tf.train.RMSPropOptimizer(learning_rate=0.01)\n\n # train operation\n train_op = optmizer.minimize(loss_op)\n\n # metrics\n correct_pred = tf.equal(tf.argmax(logits, axis=1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\nutils.show_graph(g)\n\nepochs = 10\nbatch_size = 128\nval_step = int(len(train_images) * 0.9)\n\ntrain_images = train_images.astype('float32') / 255\ntest_images = test_images.astype('float32') / 255\n\n#reshape the images\ntrain_images_reshaped = np.reshape(train_images, (len(train_images), -1))\ntest_images_reshaped = np.reshape(test_images, [len(test_images), -1])\n\nprint('shape train images {}'.format(train_images_reshaped.shape))\n\n\ndef one_hot(labels, num_classes):\n results = np.zeros(shape=(len(labels), num_classes), dtype=np.float32)\n for i, values in enumerate(labels):\n results[i,values] = 1.\n return results\n\n\ntrain_labels_one_hot = one_hot(train_labels, 10)\ntest_labels_one_hot = one_hot(test_labels, 10)\nprint('shape train labels with one hot econding {}'.format(train_labels_one_hot.shape))\n\ntrain_x, val_x = train_images_reshaped[:val_step], train_images_reshaped[val_step:]\ntrain_y, val_y = train_labels_one_hot[:val_step], train_labels_one_hot[val_step:]\n\nnum_batches = len(train_x) // batch_size\n\n\ndef to_batch(x,y, batch_size, shuffle=True):\n idxs = np.arange(0, len(x))\n np.random.shuffle(idxs)\n x = x[idxs]\n y = y[idxs]\n num_batches = len(x) // batch_size\n for i in range(num_batches):\n start = i * batch_size\n end = (i + 1) * batch_size\n if end < len(x):\n yield x[start:end], y[start:end]\n else:\n yield x[start:], y[start:]\n\n\nwith tf.Session(graph=g) as sess:\n # initialize variables (i.e. assign to their default value)\n sess.run(tf.global_variables_initializer())\n\n for e in range(1, epochs + 1):\n gen = to_batch(train_x, train_y, batch_size)\n for _ in range(num_batches):\n x_batch, y_batch = next(gen)\n sess.run(train_op, feed_dict={X: x_batch, y: y_batch})\n loss, acc = sess.run([loss_op, accuracy], feed_dict={X: val_x, y: val_y})\n print(\"Epochs {}, val_loss={:.3f}, val_acc={:.3f}\".format(e, loss, acc))\n\n print('training finished')\n\n test_acc = sess.run(accuracy, feed_dict={X: test_images_reshaped, y: test_labels_one_hot})\n print('Testing accuracy {}'.format(test_acc))\n\n saver = tf.train.Saver()\n save_path = saver.save(sess, '/tmp/model.ckpt')\n print('model saved in path {}'.format(save_path))\n\nwith tf.Session(graph=g) as sess:\n saver = tf.train.Saver()\n saver.restore(sess, '/tmp/model.ckpt')\n\n i = np.random.randint(0, len(test_images))\n res = np.argmax(sess.run(logits, feed_dict={X: [test_images_reshaped[i]]}))\n print('class predicted {}, expected {}'.format(res, test_labels[i]))\n plt.imshow(test_images[i], cmap='gray')","sub_path":"FF_NeuralNetwork.py","file_name":"FF_NeuralNetwork.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"127819594","text":"# coding: utf8\n\nimport fire\nimport runpy\nimport webbrowser\n\nfrom auditorium import Show\n\n\nclass Auditorium:\n @staticmethod\n def run(\n path,\n host=\"127.0.0.1\",\n port=6789,\n debug=False,\n instance_name=\"show\",\n launch=True,\n ):\n \"Runs a custom Python script as a slideshow.\"\n\n show = Show.load(path, instance_name)\n show.run(host=host, port=port, debug=debug, launch=launch)\n\n @staticmethod\n def demo(host=\"127.0.0.1\", port=6789, debug=False, launch=True):\n \"Starts the demo slideshow.\"\n\n from auditorium.demo import show\n\n show.run(host, port, debug=debug, launch=launch)\n\n @staticmethod\n def render(path, theme=\"white\", instance_name=\"show\"):\n \"Renders a slideshow into a single HTML with all resources embedded.\"\n\n show = load(path, instance_name)\n print(show.render(theme))\n\n @staticmethod\n def test():\n return \"It's OK!\"\n\n\ndef main():\n fire.Fire(Auditorium, name=\"auditorium\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"auditorium/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"453917501","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\n\n# General day 2 stuff\n\ndef exec(ctx, idx):\n # pad out ctx if we're at the end.\n if len(ctx) < idx+4:\n ctx = ctx + [0,0,0,0]\n instr, arg1, arg2, out = ctx[idx:idx+4]\n if instr == 1:\n ctx[out] = ctx[arg1] + ctx[arg2]\n return True\n elif instr == 2:\n ctx[out] = ctx[arg1] * ctx[arg2]\n return True\n elif instr == 99:\n return False\n else:\n raise Exception(\"bad instruction: %s at index %d\" % (instr, idx))\n\n\ndef load(f):\n contents = []\n for line in f:\n contents.extend(line.split(','))\n return [ int(i) for i in contents ]\n\n\ndef run(ctx, noun, verb):\n ctx[1] = noun\n ctx[2] = verb\n idx = 0\n while exec(ctx, idx * 4):\n idx += 1\n return ctx[0]\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('input', type=argparse.FileType('r'), nargs='?', default=sys.stdin)\n\n args = parser.parse_args(sys.argv[1:])\n ctx = load(args.input)\n for x in range(100):\n for y in range(100):\n out = run(list(ctx), x, y)\n if out == 19690720:\n print(x, y)\n\n","sub_path":"2019/day-02/fast.py","file_name":"fast.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"499916705","text":"import random\n\n\ndef getJoinMessage():\n joinArray = [\"**Result:** 76. Ragnarok's here!\",\n \"Ragnarok has been summoned by the great god Bahamut.\",\n \"All hail Tiamat!\",\n \"I just woke up, whaddaya want?\",\n \"Is this thing on?\",\n \"Beep boop, I'm a bot.\",\n \"Roll a DEX save, I sneezed.\",\n \"Deal with it.\",\n \"*yawn*\"]\n return joinArray[random.randint(0, len(joinArray) - 1)]\n\ndef getCritMessage():\n critArray = [\"\",\n \"Maybe you'll be able to hit, for once.\",\n \"**fistbump**\",\n \"Maybe they were just distracted?\",\n \"Enemy off balance! If in combat, DC 15 DEX save or be knocked prone.\",\n \"Do a barrel roll!\",\n \"For science!\",\n \"Do an aileron roll! Even though dragons don't have ailerons!\",\n \"Ryuu ga waga teki wo kurau!\"]\n return random.choice(critArray) #critArray[random.randint(0, len(critArray) - 1)]\n\ndef getFailMessage():\n failArray = [\"\",\n \"(╯°□°)╯︵ ┻━┻\",\n \"You failed!\",\n \"playerpls\",\n \"Your attack swings straight and true, missing the enemy with calculated precision.\",\n \"You're off balance! If applicable, DC 15 DEX save or fall prone.\",\n \"You're overextended! -2 to your AC next turn!\",\n \"http://vignette3.wikia.nocookie.net/sims/images/b/bb/There_was_an_attempt.png/revision/latest?cb=20140923153317\",\n \"I can't watch.\"]\n return random.choice(failArray) #failArray[random.randint(0, len(failArray) - 1)]\n\ndef getAllSpellMessage():\n allSpellArray = [\"...No.\",\n \"Really?\",\n \"A-L-L. All.\",\n \"All, 10th-level Magic.\\n**Casting Time:** Instantaneous\\n**Range:** Infinity\\n**Components:** V, S, M\\n**Material Requirement:** Your character sheet.\\n**Duration:** Until dispelled\\n**Concentration:** no\\n\\nRagnarok gains control of your character for the duration of this spell.\"]\n return random.choice(allSpellArray) #allSpellArray[random.randint(0, len(allSpellArray) - 1)]\n\ndef getGameName():\n gameNameArray = [\"D&D 5e\",\n \"Barrel Roll Simulator 2016\",\n \"Dungeons and Dragons\",\n \"The Game\"]\n return random.choice(gameNameArray) #gameNameArray[random.randint(0, len(gameNameArray) - 1)]\n\ndef getBanMessage(target):\n banArray = [target + \" is banned forever! They are never coming back! This command doesn't actually do anything!\",\n \"I curse \" + target + \" with the wrath of a hundred mimics! May they never return from the Abyssal Planes! This command doesn't actually do anything!\",\n target + \" is banned! They are banished to the Infernal Planes! May they burn with the wrath of a hundred fire-resistant imps! This command doesn't actually do anything!\",\n target + \" is banished to the native plane of flumphs! FLUUUUMPHS!\",\n \"http://i.imgur.com/O3DHIA5.gif\"]\n return random.choice(banArray) #banArray[random.randint(0, len(banArray) - 1)]\n","sub_path":"tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"488321128","text":"# -*- coding: utf-8 -*-\r\nimport os \r\n\r\nimport urllib\r\n\r\nimport urllib2\r\n\r\nimport re\r\n\r\ninfo = u'''\r\n-------------------------\r\n1.所有\r\n2.大胸妹\r\n3.小翘臀\r\n4.美腿控\r\n5.有颜值\r\n6.大杂烩\r\n请输入数字选择要下载的类别:\r\n--------------------------\r\n'''\r\nprint(info)\r\nnum = raw_input() #获取下载选项\r\nchoices = {\r\n '1':r'',\r\n '2':r'cid=2&',\r\n '3':r'cid=6&',\r\n '4':r'cid=7&',\r\n '5':r'cid=3&',\r\n '6':r'cid=4&',\r\n '7':r'cid=5&'\r\n }\r\n\r\n\r\nchoice = choices[num]\r\nbase_url = r'http://www.dbmeinv.com/dbgroup/show.htm?'\r\nchoice_url = base_url + choice #要下载的页面的首页url\r\nprint(u'start page: ')\r\nstartpage = input()\r\nprint(u'end page: ')\r\nendpage = input()\r\npage_index = r'pager_offse='\r\nurl_pages = [] #储存所有要下载的url\r\n\r\n\r\n\r\n\r\n#拼接url的各个部分\r\nfor index in range(int(startpage),int(endpage)):\r\n url_pages.append(choice_url + page_index + str(index))\r\n\r\n#print(u'path: ') \r\n#dir_path = raw_input() #保存路径名\r\n#print(u'folder name: ') \r\n#dir_title = raw_input() #保存文件夹名\r\n#new_path = os.path.join(dir_path, dir_title) #拼接出最终的路径\r\n\r\nnew_path = \"/opt/meinv/\"\r\n\r\nif not os.path.isdir(new_path): #如果不存在就创建文件夹\r\n os.makedirs(new_path)\r\n print(u'save img path:%s' % new_path)\r\n\r\n\r\nj = startpage #用来给图片编号\r\n\r\nfor page in url_pages: #循环每个要下载的页面\r\n myUrl = page #取出一个url\r\n print(myUrl)\r\n content = urllib.urlopen(myUrl).read().decode('utf-8') #获取url的html代码\r\n pattern = re.compile(r'' ,re.S) #正则表达式对象\r\n allurl = re.findall(pattern, content) #找到并返回所有符合对象的值,即图片链接\r\n i = 0 #编号用\r\n \r\n for item in allurl: \r\n location = r'%s%s_%s.jpg' % (new_path, j, i) #图片存储路径\r\n print(item)\r\n urllib.urlretrieve(item, location) #下载图片到指定位置\r\n i += 1\r\n \r\n j += 1\r\n","sub_path":"bieren.py","file_name":"bieren.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"111041815","text":"import unittest\n\nimport mock\n\nfrom testing import RedconTestBase\nfrom redcon import types\n\n\nclass NotUuid4(object):\n def __init__(self):\n self.counter = 0\n\n def __call__(self, *args, **kwargs):\n self.counter += 1\n return 'not_uuid_{}'.format(self.counter)\n\n\nclass TestCounter(RedconTestBase):\n\n def setUp(self):\n super(TestCounter, self).setUp()\n\n self.key = 'testcounter'\n self.counter = self.redcon.counter(self.key)\n\n def test_add(self):\n val = self.counter + 2\n self.assertEqual(2, val)\n res = self.redis.get(self.key)\n self.assertEqual('2', res)\n\n def test_radd(self):\n self.redis.incr(self.key)\n val = 3 + self.counter\n self.assertEqual(4, val)\n res = self.redis.get(self.key)\n self.assertEqual('4', res)\n\n def test_sub(self):\n self.redis.incr(self.key, 3)\n val = self.counter - 2\n self.assertEqual(1, val)\n res = self.redis.get(self.key)\n self.assertEqual('1', res)\n\n def test_rsub(self):\n self.redis.incr(self.key, 3)\n val = 2 - self.counter\n self.assertEqual(1, val)\n res = self.redis.get(self.key)\n self.assertEqual('1', res)\n\n\nclass TestSetBase(RedconTestBase):\n\n @property\n def all1(self):\n return self.redis.smembers(self.key1)\n\n def setUp(self):\n super(TestSetBase, self).setUp()\n\n self.key1 = 'testset1'\n self.set1 = self.redcon.set(self.key1)\n self.redis.sadd(self.key1, 1, 2, 3)\n\n self.key2 = 'testset2'\n self.set2 = self.redcon.set(self.key2, ['a', 'b', 'c'])\n\n self.key3 = 'testset3'\n self.set3 = self.redcon.set(self.key3, [':('])\n\n self.key4 = 'testset4'\n self.set4 = self.redcon.set(self.key4, [2, 3])\n\n self.key5 = 'testset5'\n self.set5 = self.redcon.set(self.key5, [2, 3])\n\n\nclass TestSet(TestSetBase):\n\n def test_created_with_iter(self):\n self.redcon.set('itertest', [1, 2, 3])\n expected = {'1', '2', '3'}\n actual = self.redis.smembers('itertest')\n self.assertEqual(expected, actual)\n\n\nclass TestSetContainerMethods(TestSetBase):\n\n def test_iter(self):\n actual = list(self.set1)\n actual.sort()\n expected = ['1', '2', '3']\n self.assertEqual(expected, actual)\n\n def test_in(self):\n actual = (3 in self.set1)\n self.assertTrue(actual)\n actual = ('no' in self.set1)\n self.assertFalse(actual)\n\n def test_not_in(self):\n actual = ('no' not in self.set1)\n self.assertTrue(actual)\n actual = (3 not in self.set1)\n self.assertFalse(actual)\n\n def test_len(self):\n actual = len(self.set1)\n self.assertEqual(3, actual)\n\n\nclass TestSetComparisonMethods(TestSetBase):\n\n def test_eq_builtin_set(self):\n self.assertEqual(self.set2, set(['a', 'b', 'c']))\n\n def test_eq_redcon_set(self):\n self.assertEqual(self.set4, self.set5)\n\n def test_ge_builtin_set(self):\n self.assertGreaterEqual(self.set2, set(['a', 'b']))\n\n def test_ge_redcon_set(self):\n self.assertGreaterEqual(self.set1, self.set4)\n\n def test_gt_builtin_set(self):\n self.assertGreater(self.set2, set(['a', 'b']))\n\n def test_gt_redcon_set(self):\n self.assertGreater(self.set1, self.set4)\n\n def test_le_builtin_set(self):\n self.assertLessEqual(self.set3, set([':(']))\n\n def test_le_redcon_set(self):\n self.assertLessEqual(self.set4, self.set1)\n\n def test_lt_builtin_set(self):\n self.assertLess(self.set3, set([':(', ':)']))\n\n def test_lt_redcon_set(self):\n self.assertLess(self.set4, self.set1)\n\n def test_ne_builtin_set(self):\n self.assertNotEqual(self.set1, set(['a']))\n\n def test_ne_redcon_set(self):\n self.assertNotEqual(self.set1, self.set2)\n\n\nclass TestSetPublicMethods(TestSetBase):\n\n def test_add(self):\n self.set1.add('abc')\n expected = set([x for x in '123'] + ['abc'])\n self.assertEqual(expected, self.all1)\n\n def test_difference_redcon_sets(self):\n actual = self.set1.difference(self.set4, self.set2)\n expected = set('1')\n self.assertEqual(expected, actual)\n\n def test_difference_builtins(self):\n expected = set(['2'])\n actual = self.set1.difference(['1', '3', '5'], set(['1', 'a']))\n self.assertEqual(expected, actual)\n\n @mock.patch('redcon.types.uuid.uuid4')\n def test_difference_builtins_temp_set_created(self, _uuid4):\n _uuid4.side_effect = NotUuid4()\n _pipe = mock.MagicMock()\n\n with mock.patch.object(self.redcon._conn, 'pipeline'):\n self.redcon._conn.pipeline.return_value = _pipe\n self.set1.difference(['1', '3', '5'], set(['1', 'a']))\n\n _pipe.assert_has_calls([\n mock.call.sadd('not_uuid_1', '1', '3', '5'),\n mock.call.expire('not_uuid_1', 5),\n mock.call.sadd('not_uuid_2', '1', 'a'),\n mock.call.expire('not_uuid_2', 5),\n mock.call.sdiff(self.key1, 'not_uuid_1', 'not_uuid_2'),\n mock.call.execute()\n ])\n\n def test_difference_mix(self):\n expected = set('a')\n actual = self.set2.difference(self.set3, set([':(', 'b', 'c']))\n self.assertEqual(expected, actual)\n\n def test_difference_non_iter(self):\n with self.assertRaises(TypeError) as cm:\n self.set1.difference(7)\n self.assertEqual(\n \"'int' object is not iterable\",\n cm.exception.message\n )\n\n def test_intersection_redcon_sets(self):\n expected = set(['2', '3'])\n actual = self.set1.intersection(self.set4, self.set5)\n self.assertEqual(expected, actual)\n\n def test_intersection_builtins(self):\n expected = set(['c'])\n actual = self.set2.intersection(['b', 'c', 'd'], ['c', 'd'])\n self.assertEqual(expected, actual)\n\n @mock.patch('redcon.types.uuid.uuid4')\n def test_intersection_builtins_temp_set_created(self, _uuid4):\n _uuid4.side_effect = NotUuid4()\n _pipe = mock.MagicMock()\n\n with mock.patch.object(self.redcon._conn, 'pipeline'):\n self.redcon._conn.pipeline.return_value = _pipe\n self.set1.intersection(['1', '2'], ['2', '3'])\n\n _pipe.assert_has_calls([\n mock.call.sadd('not_uuid_1', '1', '2'),\n mock.call.expire('not_uuid_1', 5),\n mock.call.sadd('not_uuid_2', '2', '3'),\n mock.call.expire('not_uuid_2', 5),\n mock.call.sinter(self.key1, 'not_uuid_1', 'not_uuid_2'),\n mock.call.execute()\n ])\n\n def test_isdisjoint_redcon_set(self):\n self.assertTrue(\n self.set1.isdisjoint(self.set2)\n )\n self.assertFalse(\n self.set1.isdisjoint(self.set4)\n )\n\n def test_isdisjoint_builtin_set(self):\n self.assertTrue(\n self.set1.isdisjoint(['a'])\n )\n self.assertFalse(\n self.set1.isdisjoint(['1', '2'])\n )\n\n @mock.patch('redcon.types.uuid.uuid4')\n def test_isdisjoint_builtin_set_temp_created(self, _uuid4):\n random_key = 'random_key'\n _uuid4.return_value = random_key\n _pipe = mock.MagicMock()\n\n with mock.patch.object(self.redcon._conn, 'pipeline'):\n self.redcon._conn.pipeline.return_value = _pipe\n self.set1.isdisjoint(['1', '2'])\n\n _pipe.assert_has_calls([\n mock.call.sadd(random_key, '1', '2'),\n mock.call.expire(random_key, 5),\n mock.call.sinter(self.key1, random_key),\n mock.call.execute()\n ])\n\n def test_issubset_builtin_set(self):\n self.assertTrue(\n self.set1.issubset(set(['1', '2', '3', '4']))\n )\n self.assertFalse(\n self.set1.issubset(set(['1', '2']))\n )\n\n def test_issubset_builtin_iter(self):\n self.assertTrue(\n self.set1.issubset('123')\n )\n self.assertFalse(\n self.set1.issubset('12')\n )\n\n def test_issubset_redcon_set(self):\n self.assertTrue(\n self.set4.issubset(self.set1)\n )\n self.assertFalse(\n self.set1.issubset(self.set4)\n )\n\n def test_issubset_chain(self):\n chain = self.set1 | self.set2\n self.assertTrue(\n self.set2.issubset(chain)\n )\n self.assertFalse(\n self.set3.issubset(chain)\n )\n\n def test_issuperset_builtin_set(self):\n self.assertTrue(\n self.set1.issuperset(set('12'))\n )\n self.assertFalse(\n self.set1.issuperset(set('1234'))\n )\n\n def test_issuperset_builtin_iter(self):\n self.assertTrue(\n self.set1.issuperset('12')\n )\n self.assertFalse(\n self.set1.issuperset('1234')\n )\n\n def test_issuperset_redcon_set(self):\n self.assertTrue(\n self.set1.issuperset(self.set4)\n )\n self.assertFalse(\n self.set4.issuperset(self.set1)\n )\n\n def test_issuperset_chain(self):\n chain = self.set4 | self.set5\n self.assertTrue(\n self.set1.issuperset(chain)\n )\n self.assertFalse(\n self.set2.issuperset(chain)\n )\n\n def test_union_meth_generic_iter(self):\n actual = self.set1.union(['a', 'b'])\n expected = {'1', '2', '3', 'a', 'b'}\n self.assertEqual(expected, actual)\n\n def test_union_meth_redcon_set(self):\n actual = self.set1.union(self.set2)\n expected = {'1', '2', '3', 'a', 'b', 'c'}\n self.assertEqual(expected, actual)\n\n def test_union_meth_redcon_set_and_generic_iter(self):\n actual = self.set1.union(['p', 'q'], self.set2)\n expected = {'1', '2', '3', 'a', 'b', 'c', 'p', 'q'}\n self.assertEqual(expected, actual)\n\n def test_union_meth_non_iter(self):\n with self.assertRaises(TypeError) as cm:\n self.set1.union(3)\n self.assertEqual(\"'int' object is not iterable\", cm.exception.message)\n\n def test_update_meth_non_iter(self):\n with self.assertRaises(TypeError) as cm:\n self.set1.update(3)\n self.assertEqual(\n \"'int' object is not iterable\",\n cm.exception.message\n )\n\n def test_update_meth_one(self):\n self.set1.update('b')\n expected = set('123b')\n self.assertEqual(expected, self.all1)\n\n def test_update_meth_many(self):\n self.set1.update('c', 'd', 'e')\n expected = set('123cde')\n self.assertEqual(expected, self.all1)\n\n def test_update_oper_one(self):\n s = {7, 8}\n self.set1 |= s\n expected = set('12378')\n self.assertEqual(expected, self.all1)\n\n def test_update_oper_many(self):\n s1 = {'a', 'b'}\n s2 = {'c', 'd'}\n self.set1 |= s1 | s2\n expected = set('123abcd')\n self.assertEqual(expected, self.all1)\n\n def test_update_oper_non_set(self):\n with self.assertRaises(TypeError) as cm:\n self.set1 |= 'a'\n self.assertEqual(\n \"unsupported operand type(s) for |=: 'Set' and 'str'\",\n cm.exception.message\n )\n\n @unittest.skip('not implemented')\n def test_update_builtin_set(self):\n py_set = {1, 2, 3}\n redcon_set = self.redcon.set(self.key1)\n redcon_set.update((4, 5, 6))\n py_set |= redcon_set\n expected = {1, 2, 3, '4', '5', '6'}\n self.assertEqual(expected, py_set)\n\n\nclass TestSetUnionChainOper(RedconTestBase):\n\n def setUp(self):\n super(TestSetUnionChainOper, self).setUp()\n\n self.key1 = 'set1'\n self.key2 = 'set2'\n self.key3 = 'set3'\n self.key4 = 'set4'\n\n self.redis.sadd(self.key1, 1, 2, 3)\n self.redis.sadd(self.key2, 2, 3, 4)\n self.redis.sadd(self.key3, 3, 4, 5)\n self.redis.sadd(self.key4, 4, 5, 6)\n\n self.set1 = self.redcon.set(self.key1)\n self.set2 = self.redcon.set(self.key2)\n self.set3 = self.redcon.set(self.key3)\n self.set4 = self.redcon.set(self.key4)\n\n def test_returns_self(self):\n collector = types.SetUnionChain(self.redcon, self.set1)\n res = collector | self.set2\n assert res is collector\n\n def test_repr(self):\n actual = repr(self.set1 | self.set2 | set(['a']))\n expected = \"SetUnionChain(keys=['set1', 'set2'], others=[set(['a'])])\"\n self.assertEqual(expected, actual)\n\n def test_non_set_oper(self):\n collector = types.SetUnionChain(self.redcon, self.set1)\n with self.assertRaises(TypeError) as cm:\n collector | 3\n self.assertEqual(\n \"unsupported operand type(s) for |: 'Set' and 'int'\",\n cm.exception.message\n )\n\n def test_iter_redcon_sets(self):\n res = self.set1 | self.set2 | self.set3 | self.set4\n actual = set(res)\n expected = set(['1', '2', '3', '4', '5', '6'])\n self.assertEqual(expected, actual)\n\n def test_iter_combines_keys_to_sunion(self):\n with mock.patch.object(self.set1._redcon, '_conn'):\n set(self.set1 | self.set2 | self.set3 | self.set4)\n self.set1._redcon._conn.sunion.assert_called_once_with(\n self.key1, self.key2, self.key3, self.key4\n )\n\n def test_iter_mixed(self):\n non_redcon = set(['a', 'b', 'c'])\n res = self.set1 | self.set2 | non_redcon\n actual = set(res)\n expected = set(['1', '2', '3', '4', 'a', 'b', 'c'])\n self.assertEqual(expected, actual)\n\n def test_iter_mixed_one_each(self):\n non_redcon = set(['a', 'b'])\n res = self.set1 | non_redcon\n actual = set(res)\n expected = set(['1', '2', '3', 'a', 'b'])\n self.assertEqual(expected, actual)\n\n def test_setunion_oper_setunion(self):\n res1 = self.set1 | self.set2\n res2 = self.set3 | self.set4 | set(['a'])\n actual = res1 | res2\n expected = set(['1', '2', '3', '4', '5', '6', 'a'])\n self.assertEqual(expected, actual)\n\n def test_setunion_oper_setunion_combines_keys_to_sunion(self):\n setunion1 = self.set1 | self.set2\n setunion2 = self.set3 | self.set4\n with mock.patch.object(setunion1._redcon, '_conn'):\n set(setunion1 | setunion2)\n setunion1._redcon._conn.sunion.assert_called_once_with(\n self.key1, self.key2, self.key3, self.key4\n )\n\n def test_ror(self):\n expected = set(['1', '2', '3', 'a'])\n actual = {'a'} | self.set1\n self.assertEqual(expected, actual)\n\n actual = {'b'} | actual\n expected = set(['1', '2', '3', 'a', 'b'])\n self.assertEqual(expected, actual)\n\n def test_eq_builtin_set(self):\n actual = self.set1 | self.set2 | self.set3\n expected = set(['1', '2', '3', '4', '5'])\n self.assertEqual(expected, actual)\n\n def test_eq_chain(self):\n self.assertEqual(\n self.set1 | self.set2,\n self.set1 | self.set2\n )\n\n def test_ne_builtin_set(self):\n self.assertNotEqual(\n self.set1 | self.set2,\n set(['1', '2', '3'])\n )\n\n def test_ne_chain(self):\n self.assertNotEqual(\n self.set1 | self.set2,\n self.set2 | self.set3\n )\n\n def test_gt_builtin_set(self):\n chain = self.set1 | self.set2 | self.set4\n s = set(['3', '4', '5'])\n self.assertGreater(chain, s)\n\n def test_gt_chain(self):\n self.assertGreater(\n self.set1 | self.set2 | self.set3 | self.set4,\n self.set2 | self.set3\n )\n\n def test_ge_builtin_set(self):\n chain = self.set1 | self.set2 | self.set4\n s = set(['3', '4', '5'])\n self.assertGreaterEqual(chain, s)\n\n def test_ge_chain(self):\n self.assertGreaterEqual(\n self.set1 | self.set2,\n self.set1 | self.set2\n )\n\n def test_lt_builtin_set(self):\n chain = self.set1 | self.set2\n s = set(self.set1 | self.set2 | self.set3 | self.set4)\n self.assertLess(chain, s)\n\n def test_lt_chain(self):\n self.assertLess(\n self.set1 | self.set2,\n self.set1 | self.set2 | self.set3\n )\n\n def test_le_builtin_set(self):\n chain = self.set1 | self.set2\n s = set(self.set1 | self.set2 | self.set3 | self.set4)\n self.assertLessEqual(chain, s)\n\n def test_le_chain(self):\n self.assertLessEqual(\n self.set1 | self.set2,\n self.set1 | self.set2\n )\n\n\nclass TestSetIntersectionChainOper(RedconTestBase):\n\n def setUp(self):\n super(TestSetIntersectionChainOper, self).setUp()\n\n self.key1 = 'set1'\n self.key2 = 'set2'\n self.key3 = 'set3'\n\n self.set1 = self.redcon.set(self.key1, ['a', 'b', 'c'])\n self.set2 = self.redcon.set(self.key2, ['b', 'c', 'd'])\n self.set3 = self.redcon.set(self.key3, ['c', 'e', 'f'])\n\n def test_returns_self(self):\n chain = types.SetIntersectionChain(self.redcon, self.set1)\n res = chain & self.set2\n assert res is chain\n\n def test_only_redcon_sets(self):\n res = self.set1 & self.set2 & self.set3\n actual = set(res)\n expected = set(['c'])\n self.assertEqual(expected, actual)\n\n def test_non_set(self):\n chain = types.SetIntersectionChain(self.redcon, self.set1)\n with self.assertRaises(TypeError) as cm:\n chain & 3\n self.assertEqual(\n \"unsupported operand type(s) for &: 'Set' and 'int'\",\n cm.exception.message\n )\n\n def test_redcon_sets_and_non(self):\n non_redcon1 = set(['b', 'c', 'e'])\n non_redcon2 = set(['a', 'b', 'c', 'd'])\n inter_result = self.set1 & non_redcon1 & non_redcon2\n actual = set(inter_result)\n expected = set(['b', 'c'])\n self.assertEqual(expected, actual)\n\n @mock.patch('redcon.types.uuid.uuid4')\n def test_non_redcon_sets_temp_created(self, _uuid4):\n non_redcon1 = set(['b', 'c', 'e'])\n non_redcon2 = set(['a', 'b', 'c', 'd'])\n\n _uuid4.side_effect = NotUuid4()\n _pipe = mock.MagicMock()\n\n with mock.patch.object(self.redcon._conn, 'pipeline'):\n self.redcon._conn.pipeline.return_value = _pipe\n inter_result = self.set1 & non_redcon1 & non_redcon2\n set(inter_result)\n\n _pipe.assert_has_calls([\n mock.call.sadd('not_uuid_1', 'b', 'c', 'e'),\n mock.call.expire('not_uuid_1', 5),\n mock.call.sadd('not_uuid_2', 'a', 'b', 'c', 'd'),\n mock.call.expire('not_uuid_2', 5),\n mock.call.sinter(self.key1, 'not_uuid_1', 'not_uuid_2'),\n mock.call.execute()\n ])\n\n def test_setinter_oper_setinter(self):\n non_redcon_set1 = set('bcde')\n non_redcon_set2 = set('cde')\n\n inter_result1 = self.set1 & self.set2 & non_redcon_set1 # {'b', 'c'}\n inter_result2 = non_redcon_set2 & self.set3 # {'c'}\n\n inter_result = inter_result1 & inter_result2\n\n expected = set('c')\n actual = set(inter_result)\n\n self.assertEqual(expected, actual)\n\n @mock.patch('redcon.types.uuid.uuid4')\n def test_setinter_oper_setinter_combines(self, _uuid4):\n _uuid4.side_effect = NotUuid4()\n _pipe = mock.MagicMock()\n with mock.patch.object(self.redcon._conn, 'pipeline'):\n self.redcon._conn.pipeline.return_value = _pipe\n\n non_redcon_set1 = set('bcde')\n non_redcon_set2 = set('cde')\n\n inter_result1 = self.set1 & self.set2 & non_redcon_set1 # {'b', 'c'}\n inter_result2 = non_redcon_set2 & self.set3 # {'c'}\n\n inter_result = inter_result1 & inter_result2\n set(inter_result)\n\n _pipe.assert_has_calls([\n mock.call.sadd('not_uuid_1', 'b', 'c', 'd', 'e'),\n mock.call.expire('not_uuid_1', 5),\n mock.call.sadd('not_uuid_2', 'c', 'd', 'e'),\n mock.call.expire('not_uuid_2', 5),\n mock.call.sinter(self.key1, self.key2, self.key3, 'not_uuid_1', 'not_uuid_2'),\n mock.call.execute()\n ])\n\n\nclass TestSetDifferenceChainOper(RedconTestBase):\n\n def setUp(self):\n super(TestSetDifferenceChainOper, self).setUp()\n\n self.key1 = 'set1'\n self.key2 = 'set2'\n self.key3 = 'set3'\n\n self.set1 = self.redcon.set(self.key1, ['a', 'b', 'c'])\n self.set2 = self.redcon.set(self.key2, ['b', 'c', 'd'])\n self.set3 = self.redcon.set(self.key3, ['c', 'e', 'f'])\n\n def test_returns_self(self):\n chain = types.SetDifferenceChain(self.redcon, self.set1)\n res = chain - self.set2\n self.assertIs(chain, res)\n\n def test_set_returns_chain(self):\n chain = self.set1 - self.set2\n self.assertIsInstance(chain, types.SetDifferenceChain)\n self.assertEqual(self.redcon, chain._redcon)\n self.assertEqual(self.set1, chain._items[0])\n self.assertEqual(self.set2, chain._items[1])\n\n def test_only_redcon_sets(self):\n res = self.set1 - self.set2 - self.set3\n actual = set(res)\n expected = set(['a'])\n self.assertEqual(expected, actual)\n\n def test_non_set(self):\n chain = types.SetDifferenceChain(self.redcon, self.set1)\n with self.assertRaises(TypeError) as cm:\n chain - 3\n self.assertEqual(\n \"unsupported operand type(s) for -: 'Set' and 'int'\",\n cm.exception.message\n )\n\n def test_redcon_sets_and_non(self):\n non_redcon1 = set(['a', 'd', 'e'])\n non_redcon2 = set(['c', '1', '2'])\n expected = set(['b'])\n diff_result = self.set1 - non_redcon1 - non_redcon2\n actual = set(diff_result)\n self.assertEqual(expected, actual)\n\n @mock.patch('redcon.types.uuid.uuid4')\n def test_non_redcon_sets_temp_created(self, _uuid4):\n non_redcon1 = set(['a', 'd', 'e'])\n non_redcon2 = set(['c', '1', '2'])\n\n _uuid4.side_effect = NotUuid4()\n _pipe = mock.MagicMock()\n\n with mock.patch.object(self.redcon._conn, 'pipeline'):\n self.redcon._conn.pipeline.return_value = _pipe\n diff_result = self.set1 - non_redcon1 - non_redcon2\n set(diff_result)\n\n _pipe.assert_has_calls([\n mock.call.sadd('not_uuid_1', 'a', 'd', 'e'),\n mock.call.expire('not_uuid_1', 5),\n mock.call.sadd('not_uuid_2', '1', '2', 'c'),\n mock.call.expire('not_uuid_2', 5),\n mock.call.sdiff(self.key1, 'not_uuid_1', 'not_uuid_2'),\n mock.call.execute()\n ])\n\n def test_setdiff_oper_setdiff(self):\n non_redcon_set1 = set(['b', 'c', 'f'])\n non_redcon_set2 = set(['a', 'b'])\n non_redcon_set3 = set(['a'])\n\n diff_result1 = self.set1 - self.set3 - non_redcon_set1 # {'a', 'e'}\n diff_result2 = non_redcon_set2 - self.set2 # {'c', 'd'}\n\n diff_final = diff_result1 - diff_result2 - non_redcon_set3\n\n expected = set(['e'])\n actual = set(diff_final)\n\n self.assertEqual(expected, actual)\n\n @mock.patch('redcon.types.uuid.uuid4')\n def test_setdiff_oper_setdiff_combines(self, _uuid4):\n _uuid4.side_effect = NotUuid4()\n _pipe = mock.MagicMock()\n with mock.patch.object(self.redcon._conn, 'pipeline'):\n self.redcon._conn.pipeline.return_value = _pipe\n\n non_redcon_set1 = set(['b', 'c', 'f'])\n non_redcon_set2 = set(['a', 'b'])\n non_redcon_set3 = set(['a'])\n\n diff_result1 = self.set1 - self.set3 - non_redcon_set1 # {'a', 'e'}\n diff_result2 = non_redcon_set2 - self.set2 # {'c', 'd'}\n diff_final = diff_result1 - diff_result2 - non_redcon_set3\n set(diff_final)\n\n _pipe.assert_has_calls([\n mock.call.sadd('not_uuid_1', 'b', 'c', 'f'), # non_redcon_set1\n mock.call.expire('not_uuid_1', 5),\n mock.call.sdiffstore('not_uuid_2', self.key1, self.key3, 'not_uuid_1'), # diff_result1\n mock.call.expire('not_uuid_2', 5),\n mock.call.sadd('not_uuid_3', 'a', 'b'), # non_redcon_set2\n mock.call.expire('not_uuid_3', 5),\n mock.call.sdiffstore('not_uuid_4', 'not_uuid_3', self.key2), # diff_result2\n mock.call.expire('not_uuid_4', 5),\n mock.call.sadd('not_uuid_5', 'a'), # non_redcon_set3\n mock.call.expire('not_uuid_5'),\n mock.call.sdiff('not_uuid_2', 'not_uuid_4', 'not_uuid_5'), # diff_final\n mock.call.execute()\n ])\n","sub_path":"tests/types-tests.py","file_name":"types-tests.py","file_ext":"py","file_size_in_byte":24931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"372072183","text":"\n\nfrom xai.brain.wordbase.verbs._heat import _HEAT\n\n#calss header\nclass _HEATS(_HEAT, ):\n\tdef __init__(self,): \n\t\t_HEAT.__init__(self)\n\t\tself.name = \"HEATS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"heat\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_heats.py","file_name":"_heats.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"24692065","text":"import theano\nimport theano.tensor as T\n\nfX = theano.config.floatX\n\n\ndef cross_covariance(y, z):\n \"\"\"\n from \"Discovering Hidden Factors of Variation in Deep Networks\"\n http://arxiv.org/abs/1412.6583\n \"\"\"\n y_mean = T.mean(y, axis=0, keepdims=True)\n z_mean = T.mean(z, axis=0, keepdims=True)\n y_centered = y - y_mean # (n, i)\n z_centered = z - z_mean # (n, j)\n outer_prod = (y_centered.dimshuffle(0, 1, 'x') *\n z_centered.dimshuffle(0, 'x', 1)) # (n, i, j)\n C = 0.5 * T.sum(T.sqr(T.mean(outer_prod, axis=0)))\n return C\n\n\ndef soft_categorical_crossentropy_i32(pred, target, alpha=0.01):\n \"\"\"\n softer cross-entropy function, where target is treated at not being\n exactly 1, and instead spreading that probability uniformly to other\n classes\n \"\"\"\n assert target.dtype == \"int32\"\n assert target.ndim == 1\n assert pred.dtype == fX\n assert pred.ndim == 2\n nb_class = pred.shape[1]\n alpha = 0.01\n t = T.extra_ops.to_one_hot(target, nb_class=nb_class, dtype=pred.dtype)\n t = T.clip(t, alpha / (nb_class.astype(fX) - 1.0), 1 - alpha)\n return T.nnet.categorical_crossentropy(pred, t)\n\n\ndef soft_binary_crossentropy(pred, target, alpha=0.01):\n \"\"\"\n softer binary cross-entropy function, where target is treated at not being\n exactly 1\n \"\"\"\n return T.nnet.binary_crossentropy(pred, T.clip(target, alpha, 1 - alpha))\n","sub_path":"treeano/sandbox/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"239804187","text":"from Var import *\nfrom Function import create\nfrom Function import createBlue\n\nposList = []\nlistStart = []\n\nfor n in range(1, sortingNb + 1):\n posList.append(x)\n listStart.append(x)\n x = x + (width - (sortingNb * posXmulti)) / sortingNb # on place les barres pour qu'elles soient pas en dehors de l'écran\n\n\n\nprint(listStart, posList)\n\n\nfor n in range(1, sortingNb + 1): # on créer des barres\n create(n, posList[list])\n\n pygame.display.flip()\n list = list + 1\n\n\n\n# posList[1], posList[2] = posList[2], posList[1]\n\n\nfor i in range(1, 10): # on les mélanges\n shuffle(posList)\n\ntime.sleep(1)\n\nscreen.fill((0, 0, 0))\n\nlist = 0\n\nprint(posList)\n\nfor n in range(1, sortingNb + 1): # je les colles après le shuffle\n create(n, posList[list])\n\n pygame.display.flip()\n list = list + 1\n\nwhile run:\n\n list = 0\n\n for event in pygame.event.get(): # quitter pygame\n if event.type == pygame.KEYDOWN and event.key == pygame.K_BACKSPACE:\n run = False\n\n if event.type == pygame.QUIT:\n run = False\n\n for n in range(0, sortingNb): # je commence le tri\n\n time.sleep(1 / (sortingNb*2))\n\n print(n, len(posList) - 2)\n\n if n <= len(posList) - 2:\n print(\"ThisIsTrue\")\n # print(posList[n + 1])\n if posList[n] > posList[n + 1]: # si gauche est plus grand que droite je le déplace à droite\n posList[n], posList[n + 1] = posList[n + 1], posList[n]\n\n\n\n screen.fill((0, 0, 0))\n\n for n in range(1, sortingNb + 1): # draw\n create(n, posList[list])\n pygame.display.flip()\n list = list + 1\n\n print (listStart)\n print (posList)\n if listStart == posList:\n list = 0\n for n in range(1, sortingNb + 1): # draw en bleu\n createBlue(n, posList[list])\n pygame.display.flip()\n list = list + 1\n time.sleep(0.15)\n\n run = False\n","sub_path":"SortingAlgo/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"253719263","text":"#!E:\\Python\\Python368-64\\python.exe\n# -*- encoding: utf-8 -*-\n'''\n@File : preprocessing.py\n@Time : 2021/04/16 15:02:21\n@Author : Yu Xiao 于潇 \n@Version : 1.0\n@Contact : superyuxiao@icloud.com\n@License : (C)Copyright 2020-2021, Key Laboratory of University Wireless Communication\n Beijing University of Posts and Telecommunications\n@Desc : None\n'''\n\n# ------------------------------ file details ------------------------------ #\n# 数据预处理\n# 去噪 \n# 降维\n# ------------------------------ file details ------------------------------ #\n\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom scipy import signal\nfrom get_scale_csi import get_scale_csi\n\n\ndef read_sample(filepath):\n \"\"\"\n @description : 读取csi样本,并归一化csi\n ---------\n @param : filepath:样本路径\n -------\n @Returns : scale_csi:归一化csi\n -------\n \"\"\"\n\n # 读取样本\n sample = np.load(filepath, allow_pickle=True)\n # 设置csi容器,格式为样本长度(帧数)*子载波数30*发送天线3*接收天线3,复数\n scale_csi = np.empty((len(sample), 30, 3, 3), dtype=complex)\n # 逐帧将csi归一化\n for i in range(len(sample)):\n scale_csi[i] = get_scale_csi(sample[i])\n\n return scale_csi\n\n\ndef butterworth_lowpass(scale_csi, order, wn):\n \"\"\"\n @description : 巴特沃斯低通滤波器\n ---------\n @param : scale_csi:归一化后的csi,order:滤波器阶数,wn:归一化截至角频率\n -------\n @Returns : 低通滤波后的csi幅度\n -------\n \"\"\"\n # 设置csi容器,格式为样本长度(帧数)*子载波数30*发送天线3*接收天线3\n csi = np.empty((len(scale_csi), 30, 3, 3))\n wn = 0.05\n order = 4\n # 引入butter函数\n b, a = signal.butter(order, wn, 'lowpass', analog=False)\n # i发射天线,j接收天线,k子载波序号\n for i in range(3):\n for j in range(3):\n for k in range(30):\n data = abs(scale_csi[:, k, i, j])\n csi[:, k, i, j] = signal.filtfilt(b, a, data, axis=0)\n\n return csi\n\n\ndef PCA_9(csi_abs, n_components, whiten):\n \"\"\"\n @description : PCA,根据天线对分成9组,每组得到一组主成分\n ---------\n @param : csi_abs:csi的幅度矩阵,n_components:主成分数,whiten:是否白化True/False\n -------\n @Returns : 返回csi_pca,主成分矩阵\n -------\n \"\"\"\n\n pca = PCA(n_components=n_components, whiten=whiten)\n # 设置csi容器,格式为样本长度(帧数)*主成分数n_components*发送天线3*接收天线3\n csi_pca = np.empty((len(csi_abs), n_components, 3, 3))\n for i in range(3):\n for j in range(3):\n data = csi_abs[:, :, i, j]\n data = np.reshape(data, (data.shape[0], -1)) # 转换成二维矩阵\n pca.fit(data)\n data_pca = pca.transform(data)\n csi_pca[:, :, i, j] = data_pca[:, :]\n\n return csi_pca\n\n\ndef PCA_1(csi_abs, n_components, whiten):\n \"\"\"\n @description : PCA,30*3*3=270路子载波,得到一组主成分\n ---------\n @param : csi_abs:csi的幅度矩阵,n_components:主成分数(1),whiten:是否白化True/False\n -------\n @Returns : 返回主成分矩阵\n -------\n \"\"\"\n\n pca = PCA(n_components=n_components, whiten=whiten)\n data = csi_abs\n data = np.reshape(data, (data.shape[0], -1)) # 转换成二维矩阵\n pca.fit(data)\n data_pca = pca.transform(data)\n\n return data_pca\n\n\n# 不同人不同位置具有相同的数据处理过程\n# 根据不同工程,对应修改函数代码\ndef ratio(path, feature_number, label):\n csi_data = np.empty((50, feature_number + 1))\n for i in range(50):\n # 样本路径\n filepath = path + str(i) + '.npy'\n # 读取样本\n scale_csi = read_sample(filepath)\n # ! 去除前20帧\n scale_csi = scale_csi[20:, :, :, :]\n # print(np.shape(scale_csi))\n ones_csi = np.ones((800, 30, 3, 3))\n ones_csi.dtype = 'float64'\n # ! 截取长度800\n if np.shape(scale_csi)[0] < 800:\n scale_csi = ones_csi\n else:\n scale_csi = scale_csi[:800, :, :]\n # print(np.shape(scale_csi))\n # ! 求csi ratio\n csi_ratio = scale_csi[:, :, 0, 0] / scale_csi[:, :, 0, 1]\n # print(np.shape(csi_ratio))\n # csi ratio phase\n csi_ratio_phase = np.unwrap(np.angle(np.transpose(csi_ratio)))\n # ! 归一化\n # normalizer = MinMaxScaler()\n # csi_normalize = normalizer.fit_transform(csi_ratio_phase)\n # csi_normalize = minmax_scale(csi_ratio_phase,axis=3)\n csi_max = np.max(csi_ratio_phase)\n csi_min = np.min(csi_ratio_phase)\n csi_normalize = (csi_ratio_phase - csi_min) / (csi_max - csi_min)\n # 添加标签\n csi_vector = np.reshape(csi_normalize, (24000,))\n csi_data[i] = np.append(csi_vector, label)\n csi_data.dtype = 'float64'\n # 返回数据\n data = csi_data\n\n return data\n\n\n# 单天线对-30路子载波拼接二维\ndef mul_subcarries(path, feature_number, label):\n csi_data = np.empty((50, feature_number + 1))\n for i in range(50):\n # 样本路径\n filepath = path + str(i + 1) + '.npy'\n # 读取样本\n scale_csi = read_sample(filepath)\n # 低通滤波\n csi_lowpass = butterworth_lowpass(scale_csi, 7, 0.01)\n # 不使用PCA选取子载波\n # 只选取天线对0-0\n csi_pca = csi_lowpass[:, :, 0, 0]\n # 截取长度800,步进10采样\n csi_vector = np.zeros((81, 30))\n if np.shape(csi_pca)[0] < 810:\n csi_empty = np.zeros((810, 30))\n csi_empty[:np.shape(csi_pca)[0]] = csi_pca[:, :]\n csi_vector[:] = csi_empty[::10, :]\n else:\n csi_pca = csi_pca[:809, :]\n csi_vector[:] = csi_pca[::10, :]\n # 添加标签\n csi_vector = np.reshape(csi_vector, (81, 30))\n csi_vector = np.reshape(csi_vector, (2430,))\n csi_data[i] = np.append(csi_vector, label)\n csi_data.dtype = 'float64'\n # 返回数据\n data = csi_data\n\n return data\n","sub_path":"cross_location/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":6234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"621892957","text":"#BEGIN HEADER\n#############/TyperPyModule Code\\#############\n#END HEADER\n\n#/###################################/\n\n#/############/Typer Global Variables\n\n#/###################################/\n\n\n#/###################################/\n\n#/###########/Typer Functions\n\n#/###################################/\n\ndef getTypingFunctionWithTypedType(_TypedType):\n\t'''\n\t\tGet the Typing Function given the _TypedType\n\t\tIf it is a \"List\", then just call list python method\n\t\telse it is going to be a SysInstanceGetter Functor\n\t'''\n\t\"\"\"\n\t\t#Get the List TypingFunction\n\t\tprint(\"The \\\"List\\\" TypingFunction is : \");\n\t\tTypingFunction=_.getModuleWithName(\"Typer\").getTypingFunctionWithTypedType(\"List\");\n\t\tprint(TypingFunction);\n\t\tprint(\"Typing the list [3] with this returns : \");\n\t\tprint(TypingFunction([3]))\n\n\t\t#Get the Dictionnary TypingFunction\n\t\tprint(\"\\nThe \\\"Dictionnary\\\" TypingFunction is : \");\n\t\tTypingFunction=_.getModuleWithName(\"Typer\").getTypingFunctionWithTypedType(\"Dictionnary\");\n\t\tprint(TypingFunction);\n\t\tprint(\"Typing the dict {\\\"Thing\\\":0} with this returns : \");\n\t\tprint(TypingFunction({\"Thing\":0}));\n\t\"\"\"\n\n\tif _TypedType!=None:\n\t\n\t\tif sys.modules['SysPyModule'].isThisType(\"List\",_TypedType):\n\t\t\treturn list;\n\n\t\telse:\n\t\t\n\t\t\tdef TypingFunctionWithTypedValue(_TypedValue=None):\n\t\t\t\t\n\t\t\t\t#Get the DefaultInstance\n\t\t\t\tDefaultInstance=sys.modules['SysPyModule'].getInstanceWithName(_TypedType);\n\t\t\t\t\n\t\t\t\t#Add the _TypedValue if it is defined\n\t\t\t\tif _TypedValue!=None:\n\t\t\t\t\tDefaultInstance+=_TypedValue;\n\t\t\t\t\t\n\t\t\t\t#Return\n\t\t\t\treturn DefaultInstance;\n\t\t\t\t\n\t\t\t\t\"\"\"\n\t\t\t\tsys.modules['SysPyModule'].getInstanceWithName(\"Functor\")+{\n\t\t\t\t'Args':[_TypedType],\n\t\t\t\t'IsGetter':True,\n\t\t\t\t'Function':sys.modules['SysPyModule'].getInstanceWithName\n\t\t\t\t}+_TypedValued;\n\t\t\t\t\"\"\"\n\n\t\t\treturn TypingFunctionWithTypedValue;","sub_path":"Modules/_drafts/Functor/Modules/Getter/Modules/Typer/drafts/SubScripts/TyperSubScript.py","file_name":"TyperSubScript.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"64198071","text":"import streamlit as st\n\nimport pandas as pd\n\nimport plotly.express as px\n\ndata = pd.read_csv('data/csv')\n\ndef convert(x):\n year, week = x.split('-')\n year = (int(year) - 2020) * 54\n return year + int(week)\n\ndata['week'] = data.year_week.apply(lambda x: convert(x))\n\nhun = data[data.country=='Hungary']\n\nfig = px.line(data_frame = hun, x = 'week', y = 'cumulative_count', color = 'indicator')\n\nst.title('covid-app')\n\ncountry = st.selectbox('Select a country', ['Hungary', 'Belgium'])\n\nst.write(f'The selected country is {country}')\n\nst.plotly_chart(fig)\n","sub_path":"app_first_version.py","file_name":"app_first_version.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"366157386","text":"# -*-coding:Utf-8 -*\n\n\n\"\"\"Ce module contient les fonctions liées au partie.\"\"\"\n\nfrom partie import Partie \nfrom pathlib import Path\nimport pickle\nimport os\n\nfile_name_partie_saved = \"parties.save\"\n\ndef verifiePartieExiste( joueur_name):\n \"\"\" On vérifie si une partie existe le fichier parties.save avec ce nom de joueur.\"\"\"\n my_file = Path(\"./\"+file_name_partie_saved)\n if not my_file.is_file():\n print (\"Le fichier 'partie.save' n'existe pas, il sera créé.\")\n open(file_name_partie_saved, \"wb\").close() \n \n if os.path.getsize(file_name_partie_saved) > 0: \n with open(file_name_partie_saved, \"rb\") as file:\n parties = pickle.load(file)\n #Si le fichier de sauvegarde existe, on vérifie si le nom du joueur est présent\n if parties is not None and isinstance(parties,dict) and joueur_name in parties and isinstance(parties.get(joueur_name),Partie):\n return parties.get(joueur_name)\n else:\n print(\"Aucun partie sauvegardé pour {}\".format(joueur_name))\n else : \n print (\"Le fichier 'partie.save' est vide.\")\n \n \n return False\n\ndef delete(joueur_name):\n \"\"\" supprime une partie terminée\"\"\"\n \n with open(file_name_partie_saved, \"rb\") as file:\n \n score_recupere = pickle.load(file)\n del score_recupere[joueur_name]\n \n with open(file_name_partie_saved,'wb') as file:\n pickle.dump(score_recupere, file) ","sub_path":"partieHelper.py","file_name":"partieHelper.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"396675782","text":"data = '''\n10\n1 2\n2 8\n4 10\n5 9\n6 10\n7 9\n'''\n\ndata = list(map(int, data.split()))\nnew = [[data[0]]]\n\nfor i in range(1, len(data), 2):\n new.append(data[i:i+2])\n\nstart = new[0][0]\nedges = new[1:]\n\nprint(start - len(edges) - 1)\n","sub_path":"Bioinformatics Stronghold/Completing a Tree.py","file_name":"Completing a Tree.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"51048163","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom distutils.core import setup\nfrom setuptools import find_packages\nfrom collections import defaultdict\n\nimport version\n\n\ndef get_files(folders):\n result = defaultdict(list)\n\n for folder in folders:\n for dir_, subdirs, files_ in os.walk(folder):\n for fpath in [os.path.join(dir_, f) for f in files_]:\n result[os.path.dirname(fpath)].append(fpath)\n\n return result\n\n\nfile_dirs = 'library', 'library-asm'\ndata_files = [(os.path.join('bin', dir_), fpath) for dir_, fpath in get_files(file_dirs).items()]\n\nsetup(\n name='zxbasic',\n scripts=['zxb.py', 'zxbasm.py', 'zxbpp.py'],\n py_modules=['asm', 'asmlex', 'asmparse', 'keywords', 'optimizer', 'version', 'identityset',\n 'parsetab', 'zxbpptab', 'zxbasmtab', 'basic',\n 'z80', 'zxblex', 'zxbparser', 'zxbpplex', 'zxbasmpplex'],\n packages=find_packages(exclude='test'),\n version=version.VERSION,\n description='The ZX Basic compiler',\n long_description=\"A BASIC to Z80 cpu asm / machine code compiler.\\n\"\n \"It mostly targets ZX Spectrum vintage machine but can be\\n\"\n \"used for other purposes.\",\n author='Jose Rodriguez',\n author_email='boriel@gmail.com',\n url='https://bitbucket.org/boriel/zxbasic',\n download_url='http://boriel.com/files/zxb/zxbasic-%s.tar.gz' % version.VERSION,\n keywords=['compiler', 'zxspectrum', 'BASIC', 'z80'], # arbitrary keywords\n data_files=data_files,\n license='GPL3',\n entry_points={\n 'console_scripts': [\n 'zxb = zxb:main',\n 'zxbasm = zxbasm:main',\n 'zxbpp = zxbpp:entry_point'\n ],\n },\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 5 - Production/Stable',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: GNU Affero General Public License v3',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n install_requires=['six', 'ply'],\n tags=['BASIC', 'zxspectrum', 'compiler', 'z80']\n)\n","sub_path":"pypi_install_script/zxbasic-1.9.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"61543377","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# a = input(\"Digite valor de 'a'\": )\n# a = float(a)\na = 3\n\n# b = input(\"Digite valor de 'b'\": )\n# b = float(b)\nb = -1\n\nx = np.arange(0, 10, 0.01)\nprint(x) # [0. 0.01 0.02 0.03 .....\nprint(type(x)) # \n\nf = a * x + b\nprint(f) # [-1.000e+00 -9.700e-01 -9.400e-01 -9.100e-01 -8.800e-01 -8.500e-01\nprint(type(f)) # \n\nplt.plot(x, f,\n color=\"y\",\n marker=\"v\",\n linestyle=\"-.\")\nplt.xlabel(\"x\")\nplt.ylabel(\"f\")\nplt.title(\"Função Linear\")\nplt.grid()\nplt.savefig(\"plot_funcao_linear_example.png\")\nplt.show()","sub_path":"05_graficos_matlibplot/01_funcao_linear_example.py","file_name":"01_funcao_linear_example.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"473168781","text":"\"\"\"Various assemblers to discretize boundary operators.\"\"\"\n\n\ndef _create_assembler(domain, dual_to_range, identifier, parameters, device_interface):\n \"\"\"Create assembler based on string.\"\"\"\n from bempp.core.singular_assembler import SingularAssembler\n from bempp.core.dense_assembler import DenseAssembler\n from bempp.core.sparse_assembler import SparseAssembler\n from bempp.core.dense_evaluator import DenseEvaluatorAssembler\n from bempp.core.dense_multitrace_evaluator import DenseMultitraceEvaluatorAssembler\n\n if identifier == \"only_singular_part\":\n return SingularAssembler(domain, dual_to_range, parameters)\n if identifier == \"dense\":\n return DenseAssembler(domain, dual_to_range, parameters)\n if identifier == \"default_nonlocal\":\n if device_interface.type == 'gpu':\n return DenseEvaluatorAssembler(domain, dual_to_range, parameters)\n return DenseAssembler(domain, dual_to_range, parameters)\n if identifier == \"sparse\":\n return SparseAssembler(domain, dual_to_range, parameters)\n if identifier == \"dense_evaluator\":\n return DenseEvaluatorAssembler(domain, dual_to_range, parameters)\n if identifier == \"multitrace_evaluator\":\n return DenseMultitraceEvaluatorAssembler(domain, dual_to_range, parameters)\n\n\nclass AssemblerInterface(object):\n \"\"\"Default Assembler interface object.\"\"\"\n\n def __init__(\n self,\n domain,\n dual_to_range,\n assembler,\n device_interface,\n precision,\n parameters=None,\n ):\n \"\"\"Initialize assembler based on assembler_type string.\"\"\"\n import bempp.api as _api\n\n self._domain = domain\n self._dual_to_range = dual_to_range\n self._parameters = _api.assign_parameters(parameters)\n self._device_interface = device_interface\n self._precision = precision\n\n if not isinstance(assembler, str):\n self._implementation = assembler\n else:\n self._implementation = _create_assembler(\n domain, dual_to_range, assembler, self.parameters,\n device_interface\n )\n\n @property\n def domain(self):\n \"\"\"Return domain space.\"\"\"\n return self._domain\n\n @property\n def dual_to_range(self):\n \"\"\"Return dual to range space.\"\"\"\n return self._dual_to_range\n\n @property\n def parameters(self):\n \"\"\"Return parameters.\"\"\"\n return self._parameters\n\n def assemble(self, operator_descriptor, *args, **kwargs):\n \"\"\"Assemble the operator.\"\"\"\n return self._implementation.assemble(\n operator_descriptor,\n self._device_interface,\n self._precision,\n *args,\n **kwargs\n )\n\n\nclass AssemblerBase(object):\n \"\"\"Base class for assemblers.\"\"\"\n\n def __init__(self, domain, dual_to_range, parameters=None):\n \"\"\"Instantiate the base class.\"\"\"\n import bempp.api as api\n\n self._domain = domain\n self._dual_to_range = dual_to_range\n self._parameters = api.assign_parameters(parameters)\n\n @property\n def domain(self):\n \"\"\"Return domain.\"\"\"\n return self._domain\n\n @property\n def dual_to_range(self):\n \"\"\"Return dual to range.\"\"\"\n return self._dual_to_range\n\n @property\n def parameters(self):\n \"\"\"Return parameters.\"\"\"\n return self._parameters\n\n def assemble(self, operator_descriptor, *args, **kwargs):\n \"\"\"Assemble the operator.\"\"\"\n raise NotImplementedError(\"Needs to be implemented by derived class.\")\n","sub_path":"bempp/api/assembly/assembler.py","file_name":"assembler.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"166775856","text":"# 파이썬 웹 스크랭핑 강의 11\n# https://nadocoding.tistory.com/10\n# 동적 페이지에 대한 웹스크래핑, 구글 무비\nimport requests\nfrom bs4 import BeautifulSoup\n\nurl = 'https://play.google.com/store/movies/top'\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',\n 'Accept-Language':'ko-KR,ko'}\nres = requests.get(url=url, headers=headers)\nres.raise_for_status()\n\nsoup = BeautifulSoup(res.text, 'lxml')\nmovies = soup.find_all('div', attrs={'class':'ImZGtf mpg5gc'})\nprint(len(movies))\n\n# with open('google_movie.html', 'w', encoding='utf8') as f:\n# # f.write(res.text)\n# f.write(soup.prettify()) # html을 예쁘게 출력\nfor movie in movies:\n title = movie.find('div', attrs={'class':'WsMG1c nnK0zc'}).get_text()\n print(title)","sub_path":"ExPyT0113.py","file_name":"ExPyT0113.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"438879064","text":"# coding: utf-8\n'''\n-----------------------------------------------------------------------------\nCopyright 2015 Esri\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n-----------------------------------------------------------------------------\n\n==================================================\nMaritimeDecisionAidToolsTestSuite.py\n--------------------------------------------------\nrequirments:\n* ArcGIS Desktop 10.X+ or ArcGIS Pro 1.X+\n* Python 2.7 or Python 3.4\nauthor: ArcGIS Solutions\ncompany: Esri\n==================================================\ndescription:\nThis test suite collects all of the Maritime Decision Aid Tools toolbox test cases:\n* FindSubmarinesTestCase.py\n* FarthestOnCircleTestCase.py\n* SubDepthRestrictionSuitabilityTestCase.py\n* SubSpecificationsTestCase.py\n* VisibilityRangeAtSeaTestCase.py\n==================================================\nhistory:\n2/29/2016 - JH - creation\n==================================================\n'''\n\nimport logging\nimport unittest\nimport Configuration\n\nTestSuite = unittest.TestSuite()\n\ndef getMaritimeTestSuite():\n ''' Run the Maritime Decision Aid Tools tests (Desktop and Pro)'''\n \n findSubmarineProTests = ['test_find_submarine_pro']\n findSubmarineDesktopTests = ['test_find_submarine_desktop']\n subDepthRestrictionSuitabilityProTests = ['test_sub_depth_restriction_suitability_pro']\n subDepthRestribtionSuitabilityDesktopTests = ['test_sub_depth_restriction_suitability_desktop']\n subSpecificationsProTests = ['test_sub_specifications_pro']\n subSpecificationsDesktopTests = ['test_sub_specifications_desktop']\n visibilityRangeAtSeaProTests = ['test_visibility_range_at_sea_pro']\n visibilityRangeAtSeaDesktopTests = ['test_visibility_range_at_sea_desktop']\n farthestOnCircleProTests = ['test_farthest_on_circle_pro']\n farthestOnCircleDesktopTests = ['test_farthest_on_circle_desktop']\n \n if Configuration.DEBUG == True: print(\" MaritimeDecisionAidToolsTestSuite.getMaritimeTestSuite\")\n \n if Configuration.Platform == \"DESKTOP\":\n Configuration.Logger.info(\"Maritime Decision Aid Tools Desktop tests\")\n addFindSubmarineTests(findSubmarineDesktopTests)\n addSubSpecificationsTests(subSpecificationsDesktopTests)\n addSubDepthRestrictionTests(subDepthRestribtionSuitabilityDesktopTests)\n addVisibilityRangeTests(visibilityRangeAtSeaDesktopTests)\n addFarthestOnCircleTests(farthestOnCircleDesktopTests)\n \n else:\n Configuration.Logger.info(\"Maritime Decision Aid Tools Pro tests\")\n addFindSubmarineTests(findSubmarineProTests)\n addSubSpecificationsTests(subSpecificationsProTests)\n addSubDepthRestrictionTests(subDepthRestrictionSuitabilityProTests)\n addVisibilityRangeTests(visibilityRangeAtSeaProTests)\n addFarthestOnCircleTests(farthestOnCircleProTests)\n\n return TestSuite\n\n\ndef addFindSubmarineTests(inputTestList):\n if Configuration.DEBUG == True: print(\" MaritimeDecisionAidToolsTestSuite.addFindSubmarineTests\")\n from . import FindSubmarinesTestCase\n for test in inputTestList:\n print(\"adding test: \" + str(test))\n Configuration.Logger.info(test)\n TestSuite.addTest(FindSubmarinesTestCase.FindSubmarinesTestCase(test))\n \ndef addSubDepthRestrictionTests(inputTestList):\n if Configuration.DEBUG == True: print(\" MaritimeDecisionAidToolsTestSuite.addSubDepthRestrictionTests\")\n from . import SubDepthRestrictionSuitabilityTestCase\n for test in inputTestList:\n print(\"adding test: \" + str(test))\n Configuration.Logger.info(test)\n TestSuite.addTest(SubDepthRestrictionSuitabilityTestCase.SubDepthRestrictionSuitabilityTestCase(test))\n \ndef addSubSpecificationsTests(inputTestList):\n if Configuration.DEBUG == True: print(\" MaritimeDecisionAidToolsTestSuite.addSubSpecificationsTests\")\n from . import SubSpecificationsTestCase\n for test in inputTestList:\n print(\"adding test: \" + str(test))\n Configuration.Logger.info(test)\n TestSuite.addTest(SubSpecificationsTestCase.SubSpecificationsTestCase(test))\n\ndef addVisibilityRangeTests(inputTestList):\n if Configuration.DEBUG == True: print(\" MaritimeDecisionAidToolsTestSuite.addVisibilityRangeTests\")\n from . import VisibilityRangeAtSeaTestCase\n for test in inputTestList:\n print(\"adding test: \" + str(test))\n Configuration.Logger.info(test)\n TestSuite.addTest(VisibilityRangeAtSeaTestCase.VisibilityRangeAtSeaTestCase(test))\n \ndef addFarthestOnCircleTests(inputTestList):\n if Configuration.DEBUG == True: print(\" MaritimeDecisionAidToolsTestSuite.addFarthestOnCircleTests\")\n from . import FarthestOnCircleTestCase\n for test in inputTestList:\n print(\"adding test: \" + str(test))\n Configuration.Logger.info(test)\n TestSuite.addTest(FarthestOnCircleTestCase.FarthestOnCircleTestCase(test))\n \n \n ","sub_path":"utils/test/suitability_tests/MaritimeDecisionAidToolsTestSuite.py","file_name":"MaritimeDecisionAidToolsTestSuite.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"220145719","text":"import tensorflow as tf\r\n\r\nx_train = [1, 2, 3]\r\ny_train = [1, 2, 3]\r\n\r\n# 변수설정\r\nW = tf.Variable(tf.random_normal([1], name='weight'))\r\nb = tf.Variable(tf.random_normal([1], name='bias'))\r\n\r\n# H(x)=Wx 모델구현, 코스트(Loss function)정의\r\nhypothesis = x_train * W + y_train\r\ncost = tf.reduce_mean(tf.square(hypothesis - y_train))\r\n\r\n# 코스트 최소화 알고리즘 정의\r\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)\r\ntrain = optimizer.minimize(cost)\r\n\r\n# 초기화 및 러닝준비\r\nsess = tf.Session()\r\nsess.run(tf.global_variables_initializer())\r\n\r\n# Fit the line\r\nfor step in range(2001):\r\n sess.run(train)\r\n if step % 20 == 0:\r\n print(step, sess.run(cost), sess.run(W), sess.run(b))","sub_path":"자율과제형/4 머신러닝 개인 공부/tf_2.py","file_name":"tf_2.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"124354004","text":"#!/usr/bin/env python\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nwith open('requirements.txt') as f:\n required_packages = f.readlines()\n\nsetup(name='hivemind',\n version='0.1',\n description='A fork of Bees With Machine Guns to make it useful for more arbitrary tasks',\n author='Oscar Carlsson',\n author_email='',\n url='http://github.com/GraveRaven/hivemind',\n license='MIT',\n packages=['hivemind'],\n scripts=['hivemind'],\n install_requires=required_packages,\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Testing :: Traffic Generation',\n 'Topic :: Utilities',\n ],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"357955530","text":"import os\nfrom objects import (\n\tCar as Car)\nfrom methods import (\n\tcarCheck as carCheck,\n\tmenu as menu,\n\tidSearch as idSearch,\n\treadInList as readInList,\n\treWrite as reWrite)\nimport re\n\n\ndef main():\n\n\tblockZero = []\n\n\tblockOne = []\n\n\tblockTwo = []\n\n\tblockThree = []\n\n\tblockFour = []\n\n\tblockFive = []\n\n\tblockSix = []\n\n\tblockSeven = []\n\n\tblockEight = []\n\n\tblockNine = []\n\n\tlistZero = []\n\n\tlistOne = []\n\n\tlistTwo = []\n\n\tlistThree = []\n\n\tlistFour = []\n\n\tlistFive = []\n\n\tlistSix = []\n\n\tlistSeven = []\n\n\tlistEight = []\n\n\tlistNine = []\n\n\toverflowBuffZero = []\n\n\toverflowBuffOne = []\n\n\toverflowBuffTwo = []\n\n\toverflowBuffThree = []\n\n\toverflowBuffFour = []\n\n\toverflowBuffFive = []\n\n\toverflowBuffSix = []\n\n\toverflowBuffSeven = []\n\n\toverflowBuffEight = []\n\n\toverflowBuffNine = []\n\n\tblockLen = 5\n\n\t#blockExample = [listExample, OverflowBuffExample]\n\n\tlistBlocks = [\n\t\tblockZero, \n\t\tblockOne, \n\t\tblockTwo, \n\t\tblockThree, \n\t\tblockFour, \n\t\tblockFive, \n\t\tblockSix, \n\t\tblockSeven, \n\t\tblockEight, \n\t\tblockNine\n\t\t]\n\n\tlistLists = [\n\t\tlistZero, \n\t\tlistOne, \n\t\tlistTwo, \n\t\tlistThree, \n\t\tlistFour, \n\t\tlistFive, \n\t\tlistSix, \n\t\tlistSeven, \n\t\tlistEight, \n\t\tlistNine\n\t\t]\n\n\tlistOverflowBuff = [\n\t\toverflowBuffZero, \n\t\toverflowBuffOne, \n\t\toverflowBuffTwo, \n\t\toverflowBuffThree, \n\t\toverflowBuffFour,\n\t \toverflowBuffFive, \n\t \toverflowBuffSix, \n\t \toverflowBuffSeven, \n\t \toverflowBuffEight, \n\t \toverflowBuffNine\n\t \t]\n\n\tfor i in range(len(listBlocks)):\n\n\t\tlistBlocks[i].append(listLists[i])\n\n\t\tlistBlocks[i].append(listOverflowBuff[i])\n\n\t\tfor j in range(blockLen): \n\n\t\t\tlistLists[i].append(None)\n\n\n\t#print(listBlocks)\n\n\tinfile = \"directCar.txt\"\n\n\toutfile = \"carparkMod.txt\"\n\n\tcounter = 0\n\n\twith open(infile, \"r+\")as f:\n\n\t\tfor row in f:\n\n\t\t\tcounter +=1\n\n\twith open(infile, \"r+\") as f:\n\n\t\tfor row in f:\n\n\t\t\tfor i in range(len(listBlocks)):\n\n\t\t\t\t\tfor j in range(counter):\n\n\t\t\t\t\t\tif j < 5:\n\n\t\t\t\t\t\t\tif row[3] == str(i) and listLists[i][j] is None:\n\n\n\t\t\t\t\t\t\t\tlistLists[i][j] = row\n\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\tif row[3]== str(i) and listLists[i][4] is not None and j >= 5 :\n\n\t\t\t\t\t\t\tlistOverflowBuff[i].append(row)\n\n\t\t\t\t\t\t\tbreak\n\n\tf.close()\n\n\n\tmenu()\n\n\tchoice = (input(\"Your choice:\"))\n\n\tprint(\"\\n\")\n\n\tif choice == \"1\":\n\n\t\t'''Input data '''\n\n\t\tif not os.path.exists(infile):\n\n\t\t\tcarstore = open(infile, \"w+\")\n\n\t\telse:\n\n\t\t\tcarstore = open(infile, \"a\")\n\n\t\tcarInCarPark = Car.carInput(infile)\n\n\t\tgetCarId = str(carInCarPark.getCarID())\n\n\t\tfor i in range(len(listBlocks)):\n\n\t\t\tfor j in range(len(listLists[i])):\n\n\t\t\t\tif getCarId[-1] == str(i) and listLists[i][j] == None :\n\n\t\t\t\t\tlistLists[i][j] = str(carInCarPark)\n\n\t\t\t\t\tcarstore.write(str(listLists[i][j]))\n\n\t\t\t\t\tbreak\n\n\t\t\t\telif getCarId[-1] == str(i) and listLists[i][4] != None:\n\n\t\t\t\t\tlistOverflowBuff[i].append(str(carInCarPark))\n\n\t\t\t\t\tcarstore.write(str(listOverflowBuff[i][-1]))\n\n\t\t\t\t\tbreak\n\n\t\tcarstore.close()\n\n\t\tprint(\"\\n\")\n\n\t\tlines = readInList(infile)\n\n\t\tlines.sort()\n\n\t\treWrite(infile, outfile, lines)\n\n\t\tmain()\n\n\telif choice == \"2\":\n\n\t\t'''Search specific data '''\n\n\t\tresult = 0\n\n\t\tcarIdSearch = input('Enter the ID : ')\n\n\t\tblockId = int(carIdSearch[3])\n\n\t\tfor i in range (len(listLists[blockId])):\n\n\t\t\tif carIdSearch in listLists[blockId][i]:\n\n\t\t\t\tprint(listLists[blockId][i])\n\n\t\t\t\tresult = 1\n\n\t\t\t\tbreak\n\n\t\tfor i in range(len(listOverflowBuff[blockId])):\n\n\t\t\tif carIdSearch in listOverflowBuff[blockId][i]:\n\n\t\t\t\tprint(listOverflowBuff[blockId][i])\n\n\t\t\t\tresult = 1\n\n\t\t\t\tbreak\n\n\t\tif result == 0:\n\n\t\t\tprint(\"No match found.\")\n\n\t\tmain()\n \t\n\telif choice == \"3\":\n\n\t\t'''Read all data'''\n\n\n\t\tfor i in range(len(listBlocks)):\n\n\t\t\tprint(\"Block #\", i, \"\\n\")\n\t\t\t\n\t\t\tfor j in range(blockLen):\n\n\t\t\t\tif listLists[i][j] != None:\n\n\t\t\t\t\tprint(listLists[i][j])\n\n\t\t\tif listOverflowBuff != []:\n\n\t\t\t\tfor j in range(len(listOverflowBuff[i])):\n\t\t\t\t\n\t\t\t\t\tprint(\"Overflow buffer #{}: {}\".format(i, listOverflowBuff[i][j]))\n\n\t\t\tprint(\"=================\")\n\n\t\tprint(\"\\n\")\n\n\t\tlines = readInList(infile)\n\n\t\tlines.sort()\n\n\t\treWrite(infile, outfile, lines)\n\n\t\tmain()\t\t\n\n\telif choice == \"4\":\n\n\t\t''' Modify data ''' \n\t\t\n\t\tcarIdSearch = input('Enter the ID : ')\n\n\t\tblockId = int(carIdSearch[3])\n\n\t\tmodlist = []\n\n\t\tlines = []\n\n\t\tresult = 0\n\n\t\tfor i in range (len(listLists[blockId])):\n\n\t\t\tif carIdSearch in listLists[blockId][i]:\n\n\t\t\t\tprint(listLists[blockId][i])\n\n\t\t\t\toldField = input(\"Field to modify: \")\n\t\t\t\t\t\n\t\t\t\tnewField = input(\"New data: \")\n\t\t\t\t\t\n\t\t\t\tlistLists[blockId][i] = listLists[blockId][i].replace(oldField, newField )\n\n\t\t\t\tresult = 1 \n\n\t\t\t\tbreak\n\n\t\tfor i in range(len(listOverflowBuff[blockId])):\n\n\t\t\tif carIdSearch in listOverflowBuff[blockId][i]:\n\n\t\t\t\tprint(listOverflowBuff[blockId][i])\n\n\t\t\t\toldField = input(\"Field to modify: \")\n\t\t\t\t\t\n\t\t\t\tnewField = input(\"New data: \")\n\t\t\t\t\t\n\t\t\t\tlistOverflowBuff[blockId][i] = listOverflowBuff[blockId][i].replace(oldField, newField )\n\n\t\t\t\tresult = 1 \n\n\t\t\t\tbreak\n\n\t\tif result == 0:\n\n\t\t\tprint(\"No match found.\")\n\n\t\telif result == 1:\n\n\t\t\twith open(infile, \"w+\") as f1:\n\n\t\t\t\tfor i in range(len(listBlocks)):\n\n\t\t\t\t\tfor j in range(blockLen):\n\n\t\t\t\t\t\tif listLists[i][j] is not None:\n\t\t\t\t\t\n\t\t\t\t\t\t\tf1.write(str(listLists[i][j]))\n\n\t\t\t\t\tfor k in range(len(listOverflowBuff[i])):\n\t\t\t\t\n\t\t\t\t\t\tf1.write(str(listOverflowBuff[i][k]))\n\n\t\t\tf1.close()\n\n\t\treWrite(infile, outfile, lines)\n\n\t\tmain()\n\n\telif choice == \"5\":\n\n\t\t''' Delete data '''\n\n\t\tcarIdSearch = input('Enter the ID : ')\n\n\t\tblockId = int(carIdSearch[3])\n\n\t\tmodlist = []\n\n\t\tlines = []\n\n\t\tresult = 0\n\n\t\t'''Checking if car is in main buffer'''\n\n\t\tfor i in range (len(listLists[blockId])):\n\n\t\t\tif carIdSearch in listLists[blockId][i]:\n\n\t\t\t\tprint(listLists[blockId][i])\n\n\t\t\t\tdeleteDesicion = input(\"Do you want to delete the record? Y/N: \")\n\n\t\t\t\tif deleteDesicion == 'y' or deleteDesicion == 'Y':\n\t\t\t\t\t\t\n\t\t\t\t\tlistLists[blockId][i] = None\n\n\t\t\t\t\tif listOverflowBuff[i] != []:\n\n\t\t\t\t\t\tlistLists[blockId][i] = listOverflowBuff[i][0]\n\n\t\t\t\t\t\tlistOverflowBuff[i].pop(0)\n\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\tresult = 1 \n\n\t\t\t\t\tbreak\n\n\t\t\t\telif deleteDesicion == 'n' or deleteDesicion == 'N':\n\t\t\t\t\n\t\t\t\t\tmain()\n\n\t\t\t\telse:\n\t\t\t\t\t\n\t\t\t\t\tprint(\"Wrong button pressed. Returning to main menu.\")\n\n\t\t\t\t\tmain()\n\n\t\t'''Checking if car is in overflow buffer'''\n\n\t\tfor i in range(len(listOverflowBuff[blockId])):\n\n\t\t\tif carIdSearch in listOverflowBuff[blockId][i]:\n\n\t\t\t\tprint(listOverflowBuff[blockId][i])\n\n\t\t\t\tdeleteDesicion = input(\"Do you want to delete the record? Y/N: \")\n\n\t\t\t\tif deleteDesicion == 'y' or deleteDesicion == 'Y':\n\t\t\t\t\t\t\n\t\t\t\t\tlistOverflowBuff[i].pop(i)\n\n\t\t\t\t\tresult = 1 \n\n\t\t\t\t\tbreak\n\n\t\t\t\telif deleteDesicion == 'n' or deleteDesicion == 'N':\n\t\t\t\t\n\t\t\t\t\tmain()\n\n\t\t\t\telse:\n\t\t\t\t\t\n\t\t\t\t\tprint(\"Wrong button pressed. Returning to main menu.\")\n\n\t\t\t\t\tmain()\n\n\t\tif result == 0:\n\n\t\t\tprint(\"No match found.\")\n\n\t\telif result == 1:\n\n\t\t\twith open(infile, \"w+\") as f1:\n\n\t\t\t\tfor i in range(len(listBlocks)):\n\n\t\t\t\t\tfor j in range(blockLen):\n\n\t\t\t\t\t\tif listLists[i][j] is not None:\n\t\t\t\t\t\n\t\t\t\t\t\t\tf1.write(str(listLists[i][j]))\n\n\t\t\t\t\tfor k in range(len(listOverflowBuff[i])):\n\t\t\t\t\n\t\t\t\t\t\tf1.write(str(listOverflowBuff[i][k]))\n\n\t\t\tf1.close()\n\n\t\tlistBlocks = None\n\n\t\tmain()\n\n\telif choice == \"6\":\n\n\t\t''' Exit ''' \n\t\t\n\t\tprint(\"Bye!\")\n\n\t\texit()\n\n\telif choice == \"0\":\n\n\t\tprint(\"Programming God's mode is not implemented yet.\")\n\n\t\tmain()\n\n\telse:\n\t\t\n\t\tprint(\"Wrong button pressed.\")\n\t\t\n\t\tmain()\n\nif __name__ == '__main__':\n\tmain()\n\t\n","sub_path":"direct_txt.py","file_name":"direct_txt.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"89582297","text":"#! /usr/bin/python\n# Requires wikipedia.py, wiki2plain.py, and python yaml\nimport wikipedia\nimport os\nimport unicodedata\nimport random\n\n\nclass Face:\n def __init__(self, topic):\n self.content = ''\n self.topic = topic\n\n if self.topic:\n self.content = self.get_text()\n\n def set_topic(self, new_topic):\n self.topic = new_topic\n topic_path = os.path.join(\"content\", \"{}.txt\".format(new_topic))\n if os.path.exists(topic_path):\n with open(topic_path, \"r\") as f:\n self.content = f.read()\n else:\n self.content = self.get_text()\n\n def get_text(self):\n print('get_text: {}'.format(self.topic))\n assert self.topic\n queries = [self.topic, self.topic[:len(self.topic) // 2]]\n tokens = self.topic.split()\n if len(tokens) > 1:\n queries.append(tokens[0])\n queries.append(tokens[-1])\n\n topic_results = list()\n for query in queries:\n topic_results.extend(wikipedia.search(query))\n print('topic_results size:', len(topic_results))\n try:\n topic_results = wikipedia.search(self.topic)\n for query in random.sample(topic_results, 9):\n self.content += wikipedia.page(query).content\n except wikipedia.exceptions.DisambiguationError:\n self.content += self.topic + ' can mean many things but to me it is'\n except wikipedia.exceptions.PageError:\n self.content += self.topic + ' is sometimes hard to find'\n return self.content\n\n def research_topic(self, topic, logger):\n content = \"\"\n\n # do a wikipedia search for the topic\n topic_results = wikipedia.search(topic)\n\n logger(\" Search returned %d articles on %s\" % (len(topic_results), topic))\n for i in range(len(topic_results)):\n try:\n data = wikipedia.page(topic_results[i]).content\n if type(data) is str:\n content += data\n elif type(data) is unicode:\n content += unicodedata.normalize('NFKD', data).encode('ascii', 'ignore')\n except:\n pass\n\n return content\n\n def fully_research_topic(self, topic, logger):\n content = \"\"\n\n content += self.research_topic(topic, logger)\n\n topic_split = topic.split()\n if len(topic_split) > 1:\n for i in range(len(topic_split)):\n try:\n # Skip words that are less than five characters\n if len(topic_split[i]) < 3:\n continue\n\n content += self.research_topic(topic_split[i], logger)\n\n except wikipedia.exceptions.DisambiguationError:\n content += topic + ' can mean many things but to me it is'\n except wikipedia.exceptions.PageError:\n content += topic + ' is sometimes hard to find'\n\n return content\n\n def parse_text(self):\n phrases = []\n words = self.content.split()\n # function to take a blob and parse out apropriately sized snippets\n for index in range(0, len(words) - 1):\n if self.topic.lower()[:len(self.topic) // 4] in words[index].lower() or self.topic.split()[-1].lower() in \\\n words[index].lower():\n cur_word = words[index]\n phrase = ''\n if index > 5:\n i = index - random.randint(0, 5)\n else:\n i = index\n counter = 0\n while cur_word.isalpha() and counter < 6:\n try:\n phrase = phrase + words[i].lower() + ' '\n i += 1\n cur_word = words[i]\n except:\n cur_word = '...'\n counter += 1\n if len(phrase.split()) > 3:\n temp = ''\n for char in phrase:\n if char.isalpha() or char.isspace():\n temp += char\n phrase = temp\n other_words = [\n 'using only my', 'forever!', 'because', 'for once in your life', 'until',\n 'Great Job!', ', but in reality', 'is wrong!', 'is #1', 'never dies', 'is really',\n 'might be', 'or not', 'better known as', 'the worst', 'kinda feels like',\n ', right?', '', ', WTF!', ', for realz', ', tru fact', 'in the feels',\n 'probably the best', '?']\n phrase += random.choice(other_words)\n phrases.append(phrase)\n phrases = list(set(phrases))\n return phrases\n\n def parse_bullets(self):\n bullets = []\n sentences = self.content.split('.')\n for ea in sentences:\n if len(ea) in range(50, 75) and \"\\n\" not in ea and \"=\" not in ea:\n bullets.append(ea)\n return bullets\n\n def get_bullets(self, min_count):\n final_bullets = []\n while len(final_bullets) < min_count:\n bullets = self.parse_bullets()\n for b in bullets:\n final_bullets.append(b.strip())\n return final_bullets\n\n def get_titles(self, min_count):\n # function to choose short headlines for the top of slides\n headlines = []\n while len(headlines) < min_count:\n phrases = self.parse_text()\n for p in phrases:\n headlines.append(p.strip())\n return headlines\n","sub_path":"content_troll.py","file_name":"content_troll.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"376262624","text":"osoba = {\n \"imie\": \"Marek\",\n \"nazwisko\": \"Banach\",\n \"wiek\": 25,\n \"hobby\": [\"programowanie\",\"wycieczki\"],\n \"student\": True,\n \"telefon\":{\"stacjonarny\":\"2233\",\"komorkowy\":\"7788\"} }\nprint(osoba)\nprint()\nprint(osoba[\"nazwisko\"])\nprint()\nprint(*osoba[\"hobby\"], sep=\", \")\nprint()\nosoba[\"nazwisko\"]=\"Nowak\"\nosoba[\"student\"]=False\nosoba[\"płeć\"]=\"mężczyzna\"\nosoba[\"hobby\"].append(\"rower\")\nosoba[\"telefon\"][\"służbowy\"]=3131\nfor f in osoba:\n print(f,\":\", osoba[f])\nprint()","sub_path":"08-DataStructures/dict9.py","file_name":"dict9.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"423925577","text":"#!/usr/bin/env python\n\nfrom carddeck import CardDeck\n\nclass Dog():\n def bark(self):\n print(\"Woof! Woof!\")\n\nclass JokerDeck(CardDeck, Dog):\n\n def _create_deck(self):\n super()._create_deck()\n for i in range(1, 3):\n joker = str(i), 'Joker'\n self._cards.append(joker)\n","sub_path":"jokerdeck.py","file_name":"jokerdeck.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"216423658","text":"# Copyright (c) ZenML GmbH 2023. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"Endpoint definitions for pipeline run secrets.\"\"\"\nfrom typing import Optional\nfrom uuid import UUID\n\nfrom fastapi import APIRouter, Depends, Security\n\nfrom zenml.constants import API, SECRETS, VERSION_1\nfrom zenml.enums import PermissionType\nfrom zenml.models.page_model import Page\nfrom zenml.models.secret_models import (\n SecretFilterModel,\n SecretResponseModel,\n SecretUpdateModel,\n)\nfrom zenml.zen_server.auth import AuthContext, authorize\nfrom zenml.zen_server.exceptions import error_response\nfrom zenml.zen_server.utils import (\n handle_exceptions,\n make_dependable,\n zen_store,\n)\n\nrouter = APIRouter(\n prefix=API + VERSION_1 + SECRETS,\n tags=[\"secrets\"],\n responses={401: error_response},\n)\n\n\n@router.get(\n \"\",\n response_model=Page[SecretResponseModel],\n responses={401: error_response, 404: error_response, 422: error_response},\n)\n@handle_exceptions\ndef list_secrets(\n secret_filter_model: SecretFilterModel = Depends(\n make_dependable(SecretFilterModel)\n ),\n auth_context: AuthContext = Security(\n authorize, scopes=[PermissionType.READ]\n ),\n) -> Page[SecretResponseModel]:\n \"\"\"Gets a list of secrets.\n\n Args:\n secret_filter_model: Filter model used for pagination, sorting,\n filtering\n auth_context: Authentication context.\n\n Returns:\n List of secret objects.\n \"\"\"\n secrets = zen_store().list_secrets(secret_filter_model=secret_filter_model)\n\n # Remove secrets from the response if the user does not have write\n # permissions.\n if PermissionType.WRITE not in auth_context.permissions:\n for secret in secrets.items:\n secret.remove_secrets()\n\n return secrets\n\n\n@router.get(\n \"/{secret_id}\",\n response_model=SecretResponseModel,\n responses={401: error_response, 404: error_response, 422: error_response},\n)\n@handle_exceptions\ndef get_secret(\n secret_id: UUID,\n auth_context: AuthContext = Security(\n authorize, scopes=[PermissionType.READ]\n ),\n) -> SecretResponseModel:\n \"\"\"Gets a specific secret using its unique id.\n\n Args:\n secret_id: ID of the secret to get.\n auth_context: Authentication context.\n\n Returns:\n A specific secret object.\n \"\"\"\n secret = zen_store().get_secret(secret_id=secret_id)\n\n # Remove secrets from the response if the user does not have write\n # permissions.\n if PermissionType.WRITE not in auth_context.permissions:\n secret.remove_secrets()\n\n return secret\n\n\n@router.put(\n \"/{secret_id}\",\n response_model=SecretResponseModel,\n responses={401: error_response, 404: error_response, 422: error_response},\n)\n@handle_exceptions\ndef update_secret(\n secret_id: UUID,\n secret_update: SecretUpdateModel,\n patch_values: Optional[bool] = False,\n _: AuthContext = Security(authorize, scopes=[PermissionType.WRITE]),\n) -> SecretResponseModel:\n \"\"\"Updates the attribute on a specific secret using its unique id.\n\n Args:\n secret_id: ID of the secret to get.\n secret_update: the model containing the attributes to update.\n patch_values: Whether to patch the secret values or replace them.\n\n Returns:\n The updated secret object.\n \"\"\"\n if not patch_values:\n # If patch_values is False, interpret the update values as a complete\n # replacement of the existing secret values. The only adjustment we\n # need to make is to set the value of any keys that are not present in\n # the update to None, so that they are deleted.\n secret = zen_store().get_secret(secret_id=secret_id)\n for key in secret.values.keys():\n if key not in secret_update.values:\n secret_update.values[key] = None\n\n return zen_store().update_secret(\n secret_id=secret_id, secret_update=secret_update\n )\n\n\n@router.delete(\n \"/{secret_id}\",\n responses={401: error_response, 404: error_response, 422: error_response},\n)\n@handle_exceptions\ndef delete_secret(\n secret_id: UUID,\n _: AuthContext = Security(authorize, scopes=[PermissionType.WRITE]),\n) -> None:\n \"\"\"Deletes a specific secret using its unique id.\n\n Args:\n secret_id: ID of the secret to delete.\n \"\"\"\n zen_store().delete_secret(secret_id=secret_id)\n","sub_path":"src/zenml/zen_server/routers/secrets_endpoints.py","file_name":"secrets_endpoints.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"197356611","text":"'''\nCreated on Sep 22, 2015\n\n@author: ljiang\n'''\nimport pytest\n\nfrom all_uniq_1_1 import *\n\n@pytest.mark.parametrize(\"input, output, func\", [('abcabc', False, 1), ('a', True, 1), ('123', True, 1), ('', True, 1), ('a186 ', False, 1),\n ('abcabc', False, 2), ('a', True, 2), ('123', True, 2), ('', True, 2), ('a186 ', False, 2)])\ndef test_all_uniq(input, output, func):\n if func == 1:\n assert uniq_or_not(input) == output\n if func == 2:\n assert uniq_or_not_2(input) == output\n","sub_path":"CareerCup/test_all_uniq_1_1.py","file_name":"test_all_uniq_1_1.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271033192","text":"#! python38\n# coding=utf-8\nimport uiautomator2 as u2\nimport time\nfrom time import sleep\nimport logging\nimport sys\n\nexit_all = False\ndef usage():\n print(\"Usage: optcode test_count delay_time\")\n print(\" optcode: 0 - power off, no need for test_count and delay_time\")\n print(\" 1 - power on, no need for test_count and delay_time\")\n print(\" 2- reset x9_evb board and check if android boot sucessfully\")\n \ndef get_os_type():\n #获取操作系统类型,Linux or Windows\n return platform.system()\n\ndef serial_opt(ser,opt, delay):\n opt_v=[]\n onrelay=(0xAA,0x00)\n offrelay=(0xBB,0x00)\n #onrelay=(0xaa,0)\n #offrelay=(0xbb, 0) \n if opt==0:\n opt_v=offrelay\n print(\"下电\", end=\"\")\n else:\n opt_v=onrelay\n print(\"上电\",end=\"\")\n\n result=ser.write(opt_v) \n if result != 2:\n print(\"失败\"+str(result))\n else:\n print(\"成功\")\n time.sleep(delay)\n \ndef init_power_serial():\n DEFUALT_COM_DES = \"USB-SERIAL CH340\" \n portx=\"\" \n bps=9600\n timex=5\n\n plist = list(serial.tools.list_ports.comports())\n #print(\"plist:\"+str(len(plist)))\n\n for port in plist: \n #if port.description.find(DEFUALT_COM_DES) >= 0:\n if port.vid == 0x1a86 and port.pid == 0x7523:\n portx = port.name\n print(port.description)\n \n ser=serial.Serial(portx, bps, timeout=timex) \n print(\"\\nprot:\"+str(portx)+\" baud:\"+str(bps))\n return ser\n\ndef deinit_power_serial(ser):\n ser.close()#关闭串口\n \ndef deinit_log_serial(ap_ser):\n ap_ser.close()#关闭串口\n\ndef get_current_time():\n curr_time = datetime.datetime.now() \n file_time = curr_time.strftime(\"_%m%d%H%M\") #保留月日小时分钟\n return file_time\n \ndef save_fail_log():\n log_time = get_current_time()\n adb_root = 'adb root'\n adb_remount = 'adb remount'\n log_path = \"D:\\\\Android_log\" + log_time\n rm_log_cmd = 'adb shell rm -rf /data/sdrv_logs'\n if os.path.exists(log_path) == False:\n os.mkdir(log_path)\n adb_pull = 'adb pull /data/sdrv_logs ' + log_path\n os.popen(adb_root) \n time.sleep(3)\n os.popen(adb_remount) \n time.sleep(3)\n ret_pull = os.popen(adb_pull) \n ret_str = ret_pull.read()\n if ret_str.find(\"error\") != -1:\n print(\"adb pull /data/sdrv_logs folder FAIL!!!\")\n time.sleep(1)\n os.popen(rm_log_cmd) \n time.sleep(1)\n \n \ndef clean_sdrv_logs():\n adb_root = 'adb root'\n adb_remount = 'adb remount'\n rm_log_cmd = 'adb shell rm -rf /data/sdrv_logs'\n os.popen(adb_root) \n time.sleep(3)\n os.popen(adb_remount) \n time.sleep(3)\n os.popen(rm_log_cmd) \n time.sleep(1)\n \n\ndef get_boot_status(timeout):\n print(\"Start to check Android boot status\")\n adb_getprop = 'adb shell getprop sys.boot_completed'\n for i in range(0, timeout):\n d = os.popen(adb_getprop)\n f = d.read()\n if f.strip() == \"1\":\n #print(\"getprop sys.boot_completed = \" + f.strip())\n break\n print(\"getprop sys.boot_completed = \" + f.strip()) \n time.sleep(1)\n print(\"getprop sys.boot_completed = \" + f.strip())\n if i == (timeout - 1):\n print(\"getprop sys.boot_completed timeout, Android boot fail\")\n #save_fail_log()\n return 0\n else:\n print(\"Android boot OK\")\n clean_sdrv_logs()\n return 1\n print(\"check Android boot status finished\")\n \ndef init_ap_serial():\n portx=\"\" \n bps=115200\n timex=1\n\n plist = list(serial.tools.list_ports.comports())\n #print(\"plist:\"+str(len(plist)))\n port_ap_log=\"\"\n port_min=\"\"\n log_port_list=[]\n for port in plist: \n #if port.description.find(DEFUALT_COM_DES) >= 0:\n if port.vid == 0x0403 and port.pid == 0x6011:\n log_port_list.append(port)\n \n log_port_list.sort()\n port_ap_log = log_port_list[2].name\n ap_ser=serial.Serial(port_ap_log,bps,timeout=timex) \n print(\"\\nap core debug com:\"+str(port_ap_log))\n \n return ap_ser\n \ndef read_log(ser, tofile, filename):\n global exit_all\n if tofile == True:\n f = open(filename, 'w',encoding='utf-8')\n\n while True:\n if exit_all == True:\n print(\"read_log will exit\")\n break\n #str = ser.readline().decode(encoding='UTF-8',errors='ignore')\n str = (ser.readline().decode(encoding='UTF-8')).strip(\"\\n\")\n if len(str) > 0:\n f.write(str)\n #print(str) \n if tofile == True:\n f.close()\n \ndef save_ap_log():\n print(\"Start to save ap log\")\n ap_ser = init_ap_serial() \n log_time = get_current_time()\n save_log_path = \"D:\\\\ap_log\" + log_time + \".txt\"\n thread_ap = threading.Thread(target=read_log, name=\"ap-core\", args=(ap_ser, True, save_log_path))\n thread_ap.start()\n return ap_ser \n\ndef start_android_autolog():\n print(\"start to save android log automatic\")\n adb_root = 'adb root'\n adb_remount = 'adb remount'\n adb_autolog = 'adb shell setprop persist.log.start 1'\n adb_rm_log = 'adb shell rm -rf /data/sdrv_logs'\n serial_opt(ser,1,30) #上电30s,保证adb可用\n os.popen(adb_root) \n time.sleep(1)\n os.popen(adb_remount)\n time.sleep(1)\n os.popen(adb_autolog)\n time.sleep(1)\n serial_opt(ser,0,5) #断电重启\n \n serial_opt(ser,1,30) #上电30s,保证adb可用\n os.popen(adb_root) \n time.sleep(1)\n os.popen(adb_remount)\n time.sleep(1)\n os.popen(adb_rm_log) #测试���需先删除sdrv_logs文件夹\n time.sleep(1)\n serial_opt(ser,0,5)\n print(\"set android log automatic finished\")\n \n\ndef stop_android_autolog():\n adb_root = 'adb root'\n adb_remount = 'adb remount'\n adb_autolog_stop = 'adb shell setprop persist.log.start 0' \n os.popen(adb_root) \n time.sleep(3)\n os.popen(adb_remount)\n time.sleep(3)\n os.popen(adb_autolog_stop)\n\n\n \ndef reset_x9_evb_boot(ser,count):\n global exit_all\n timeout = 100\n serial_opt(ser,0,5)\n ap_ser = save_ap_log() #测试中一直保存ap串口log\n start_android_autolog()\n for num in range(0,count):\n print(\"\\n第\"+str(num+1)+\"次测试开始:\\n\")\n serial_opt(ser,1,5)\n ret = get_boot_status(timeout) \n if ret == 0:\n break\n time.sleep(3) \n serial_opt(ser,0,5) \n print(\"\\n第\"+str(num+1)+\"次测试完成\\n\")\n \n exit_all = True\n deinit_log_serial(ap_ser)\n stop_android_autolog()\n\n \nif __name__==\"__main__\":\n if len(sys.argv) == 1:\n usage();\n exit(); \n \n test_case = sys.argv[1]\n count = 1\n delay = 20\n if len(sys.argv) >=3:\n count = int(sys.argv[2])\n \n if len(sys.argv) >= 4:\n delay = int(sys.argv[3])\n \n ser = init_power_serial()\n if test_case == \"0\":\n serial_opt(ser, 0,0)\n elif test_case == \"1\":\n serial_opt(ser, 1,0)\n elif test_case == \"2\":\n reset_x9_evb_boot(ser,count)\n else:\n usage()\n \n deinit_power_serial(ser)\n os._exit(0)\n\n\n\n\n","sub_path":"test_boot_jidianqi_stress.py","file_name":"test_boot_jidianqi_stress.py","file_ext":"py","file_size_in_byte":7222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"131940003","text":"\"\"\"\nQuestion:\n\nGiven a string, find out if any of its permutation is a palindrome.\n\"\"\"\nfrom unittest import TestCase\n\n\ndef is_palindrome(input):\n \"\"\"\n\n :param input:\n :return:\n\n Thoughts:\n - The brute force way is to find all permutations of the string, and check if any of it is a palindrome.\n Complexity for calculating all permutation is len(input) factorial. This is roughly O(n^2).\n - To check if a string is a palindrome, it's O(n).\n\n But we can combine these two requirements\n - if the len(input) is even, that means every char needs to have a pair\n - if the length is odd, at most one char can be pair-less.\n\n The trick here is to recognize that sometimes multiple requirement of a problem would make the problem\n easier to solve than if only given individual requirements.\n\n More thoughts\n - sometimes it saves time to list out all test cases on paper first, and then test an approach to see\n if it would work for all the test cases\n \"\"\"\n\n # space: O(n)\n input_map = {}\n\n # time: O(n)\n for char in input:\n if char in input_map:\n input_map[char] += 1\n else:\n input_map[char] = 1\n\n # look through input_map to make sure only at max one entry has an odd count\n\n # space: O(1)\n num_odd = 0\n\n # time: O(n)\n for count in input_map.itervalues():\n if count % 2 != 0:\n num_odd += 1\n\n # total space: O(n), total time: O(n)\n return num_odd <= 1\n\n\nclass TestIsPalindrome(TestCase):\n def test_is_palindrome(self):\n self.assertTrue(is_palindrome('civic'))\n self.assertTrue(is_palindrome('ivicc'))\n self.assertFalse(is_palindrome('civil'))\n self.assertFalse(is_palindrome('livci'))\n # all even\n self.assertTrue(is_palindrome('aabbcc'))\n # one odd\n self.assertTrue(is_palindrome('aabbccc'))\n # two odd\n self.assertFalse(is_palindrome('aabbcccd'))\n","sub_path":"python/cake/30_palindrome.py","file_name":"30_palindrome.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"54662750","text":"##########################################################################\n# MediPy - Copyright (C) Universite de Strasbourg\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html\n# for details.\n##########################################################################\n\nimport numpy\nfrom vtk import vtkActor, vtkContourFilter, vtkPolyDataMapper\n\nfrom layer import Layer\n\nclass ContourLayer(Layer):\n \"Layer showing its data as a set of isocontours.\"\n \n @staticmethod\n def can_create(image):\n return (image.data_type == \"scalar\" and\n image.image_type == \"spectroscopy\" and\n image.ndim <= 3)\n \n def __init__(self, world_to_slice, image, display_coordinates=\"physical\",\n colormap=None, opacity = 1.0, levels=None) :\n \n ############################\n # Property-related members #\n ############################\n \n # List of levels, or None to use default-generated values\n self._levels = None \n \n self._actor = vtkActor()\n \n ###################\n # Private members #\n ###################\n self._contour_filter = vtkContourFilter()\n self._mapper = vtkPolyDataMapper()\n \n ##################\n # Initialization #\n ##################\n \n super(ContourLayer, self).__init__(\n world_to_slice, image, display_coordinates, colormap, opacity)\n \n self._contour_filter.SetInput(self._change_information.GetOutput())\n self._contour_filter.UseScalarTreeOn()\n self._mapper.SetInputConnection(self._contour_filter.GetOutputPort())\n self._mapper.ScalarVisibilityOn()\n self._mapper.SetLookupTable(self._colormap.vtk_colormap)\n self._mapper.UseLookupTableScalarRangeOn()\n self._actor.SetMapper(self._mapper)\n \n if levels is None :\n # Generate 20 evenly spaced contour values\n levels = numpy.linspace(self._image.data.min(), \n self._image.data.max(), 20)\n self._set_levels(levels)\n \n ##############\n # Properties #\n ##############\n \n def _set_image(self, image):\n super(ContourLayer, self)._set_image(image)\n self._mapper.SetScalarRange(image.data.min(), image.data.max())\n \n def _set_colormap(self, colormap):\n super(ContourLayer, self)._set_colormap(colormap)\n self._mapper.SetLookupTable(self._colormap.vtk_colormap)\n self.colormap.add_observer(\"vtk_colormap\", self._on_vtk_colormap)\n \n def _set_opacity(self, opacity):\n super(ContourLayer, self)._set_opacity(opacity)\n self._actor.GetProperty().SetOpacity(opacity)\n \n def _get_levels(self) :\n \"Levels at which contours are computed.\"\n return self._levels\n \n def _set_levels(self, levels):\n self._levels = levels\n for i, level in enumerate(self._levels) :\n self._contour_filter.SetValue(i, level)\n \n def _get_actor(self):\n return self._actor\n\n levels = property(_get_levels, _set_levels)\n \n #####################\n # Private interface #\n #####################\n \n def _on_vtk_colormap(self, dummy) :\n \"\"\" Event handler called when the ID of colormap.vtk_colormap changes.\n \"\"\"\n \n self._set_colormap(self._colormap)\n\nLayer.derived_classes.append(ContourLayer)\n","sub_path":"lib/medipy/gui/image/contour_layer.py","file_name":"contour_layer.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"558577901","text":"# python3\n\n# Modify the given implementation of the QuickSort algorithm \n# so that it works fast even on sequences containing many \n# identical elements.\n\n\nfrom random import randint\n\n\ndef partition(array, left, right):\n x = array[left]\n start = left\n finish = left\n for j in range(left + 1, right + 1):\n # move the pivot x in front of any value that is less that it\n if array[j] < x:\n y = array[j]\n array[j] = array[finish + 1]\n array[finish + 1] = array[start]\n array[start] = y\n start += 1\n finish += 1\n # if jth value is x then move it to a position next to x\n # and increment the finish index\n elif array[j] == x:\n array[finish + 1], array[j] = array[j], array[finish + 1]\n finish += 1\n\n return start, finish\n\n\ndef randomized_quick_sort(array, left, right):\n if left < right:\n k = randint(left, right)\n array[left], array[k] = array[k], array[left]\n start, finish = partition(array, left, right)\n randomized_quick_sort(array, left, start - 1)\n randomized_quick_sort(array, finish + 1, right)\n\n\ndef main():\n input_n = int(input())\n elements = list(map(int, input().split()))\n randomized_quick_sort(elements, 0, len(elements) - 1)\n print(input_n)\n print(*elements)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"modify_quicksort/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"458964319","text":"''' A custom Importer making use of the import hook capability\n\nhttps://www.python.org/dev/peps/pep-0302/\n\nIts purpose is to convert would-be Python module that use non-standard\nsyntax into a correct form prior to importing them.\n'''\n\n# imp is deprecated but I wasn't (yet) able to figure out how to use\n# its replacement, importlib, to accomplish all that is needed here.\nimport imp\nimport re\nimport sys\n\nMAIN = False\nfrom_nonstandard = re.compile(\"(^from\\s+__nonstandard__\\s+import\\s+)\")\n\n\nclass ExperimentalImporter(object):\n '''According to PEP 302, an importer only needs two methods:\n find_module and load_module.\n '''\n\n def find_module(self, name, path=None):\n '''We don't need anything special here, so we just use the standard\n module finder which, if successful,\n returns a 3-element tuple (file, pathname, description).\n See https://docs.python.org/3/library/imp.html for details\n '''\n self.module_info = imp.find_module(name)\n return self\n\n def load_module(self, name):\n '''Load a module, given information returned by find_module().\n '''\n\n # According to PEP 302, the following is required\n # if reload() is to work properly\n if name in sys.modules:\n return sys.modules[name]\n\n path = self.module_info[1] # see find_module docstring above\n module = None\n\n if path is not None: # path=None is the case for some stdlib modules\n with open(path) as source_file:\n module = self.convert_experimental(name, source_file.read())\n\n if module is None:\n module = imp.load_module(name, *self.module_info)\n return module\n\n def convert_experimental(self, name, source):\n '''Used to convert the source code, and create a new module\n if one of the lines is of the form\n\n ^from __nonstandard__ import converter1 [, converter2, ...]\n\n (where ^ indicates the beginning of a line)\n otherwise returns None and lets the normal import take place.\n Note that this special code must be all on one physical line --\n no continuation allowed by using parentheses or the\n special \\ end of line character.\n\n \"converters\" are modules which must contain a function\n\n transform_source_code(source)\n\n which returns a tranformed source.\n '''\n global MAIN\n lines = source.split('\\n')\n\n for linenumber, line in enumerate(lines):\n if from_nonstandard.match(line):\n break\n else:\n return None # normal importer will handle this\n\n # we started with: \"from __nonstandard__ import converter1 [,...]\"\n line = from_nonstandard.sub(' ', line)\n # we now have: \"converter1 [,...]\"\n line = line.split(\"#\")[0] # remove any end of line comments\n converters = line.replace(' ', '').split(',')\n # and now: [\"converter1\", ...]\n\n # drop the \"fake\" import from the source code\n del lines[linenumber]\n source = '\\n'.join(lines)\n\n for converter in converters:\n mod_name = __import__(converter)\n source = mod_name.transform_source_code(source)\n\n module = imp.new_module(name)\n # From PEP 302: Note that the module object must be in sys.modules\n # before the loader executes the module code.\n # This is crucial because the module code may\n # (directly or indirectly) import itself;\n # adding it to sys.modules beforehand prevents unbounded\n # recursion in the worst case and multiple loading in the best.\n sys.modules[name] = module\n\n if MAIN: # see below\n module.__name__ = \"__main__\"\n MAIN = False\n exec(source, module.__dict__)\n\n return module\n\n\nsys.meta_path = [ExperimentalImporter()]\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n # this program was started by\n # $ python import_experimental.py some_script\n # and we will want some_script.__name__ == \"__main__\"\n MAIN = True\n __import__(sys.argv[1])\n","sub_path":"version5/import_nonstandard.py","file_name":"import_nonstandard.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"618373576","text":"# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\nimport base64\nimport binascii\nimport hashlib\nimport json\nimport time\n\nfrom azure_devtools.scenario_tests import RecordingProcessor\nimport six\n\n\nSECRET_FIELDS = frozenset(\n {\n \"access_token\",\n \"client_secret\",\n \"code\",\n \"device_code\",\n \"message\",\n \"password\",\n \"refresh_token\",\n \"user_code\",\n }\n)\n\n# managed identity headers are not dangerous to record but redacting them prevents anyone worrying whether they are\nSECRET_HEADERS = frozenset(\n {\n \"secret\",\n \"X-IDENTITY-SECRET\",\n }\n)\n\n\nclass RecordingRedactor(RecordingProcessor):\n \"\"\"Removes authentication secrets from recordings.\n\n :keyword bool record_unique_values: Defaults to False. Set True for tests requiring unique, consistent fake values.\n \"\"\"\n\n def __init__(self, record_unique_values=False):\n super(RecordingRedactor, self).__init__()\n self._record_unique_values = record_unique_values\n\n def process_request(self, request):\n # bodies typically contain secrets and are often formed by msal anyway, i.e. not this library's responsibility\n request.body = None\n\n for header in SECRET_HEADERS:\n if header in request.headers:\n fake_value = self._get_fake_value(request.headers[header])\n request.headers[header] = fake_value\n\n return request\n\n def process_response(self, response):\n try:\n body = json.loads(response[\"body\"][\"string\"])\n except (KeyError, ValueError):\n return response\n\n for field in body:\n if field in SECRET_FIELDS:\n fake_value = self._get_fake_value(body[field])\n body[field] = fake_value\n\n response[\"body\"][\"string\"] = json.dumps(body)\n return response\n\n def _get_fake_value(self, real_value):\n redacted_value = \"redacted\"\n if self._record_unique_values:\n digest = hashlib.sha256(six.ensure_binary(real_value)).digest()\n redacted_value += six.ensure_str(binascii.hexlify(digest))[:6]\n return redacted_value\n\n\nclass IdTokenProcessor(RecordingProcessor):\n def process_response(self, response):\n \"\"\"Changes the \"exp\" claim of recorded id tokens to be in the future during playback\n\n This is necessary because msal always validates id tokens, raising an exception when they've expired.\n \"\"\"\n try:\n # decode the recorded token\n body = json.loads(six.ensure_str(response[\"body\"][\"string\"]))\n header, encoded_payload, signed = body[\"id_token\"].split(\".\")\n decoded_payload = base64.b64decode(encoded_payload + \"=\" * (4 - len(encoded_payload) % 4))\n\n # set the token's expiry time to one hour from now\n payload = json.loads(six.ensure_str(decoded_payload))\n payload[\"exp\"] = int(time.time()) + 3600\n\n # write the modified token to the response body\n new_payload = six.ensure_binary(json.dumps(payload))\n body[\"id_token\"] = \".\".join((header, base64.b64encode(new_payload).decode(\"utf-8\"), signed))\n response[\"body\"][\"string\"] = six.ensure_binary(json.dumps(body))\n except KeyError:\n pass\n\n return response\n","sub_path":"sdk/identity/azure-identity/tests/recording_processors.py","file_name":"recording_processors.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"133613809","text":"import sys\nimport logging\nimport subprocess\nimport pyshark\nfrom datetime import datetime\nfrom pygal import XY\nfrom pygal.style import LightSolarizedStyle\nlogging.getLogger().setLevel(logging.DEBUG)\nlogger=logging.getLogger(__name__)\n\ndef ARP_Request(filename):\n #to filter and extract all ARP request packets from .pcap file which was given as input.\n # tcpdump command that can be used directly in terminal to perform this operation=>\n # sudo tcpdump -r filename.pcap arp[7]=1 -n -vvv\n # '-r' indicates to apply the mentioned filter to .pcap file adjacent to it.\n # '-n' indicates that name resolution for IP addresses is not needed.\n # '7' is the offset of 1byte(out of 2 bytes) which contains the opcode of the corresponding arp packet.\n # '1' is the opcode that indicates the the corresponding packet is an arp_request packet.\n # So collectively 'arp[1]' indicates tcpdump to filter arp_request packets.\n logger.info('Filtering arp_request packets')\n out=subprocess.Popen(['tshark','-r',filename ,'arp[7]==1'],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n stdout,stderr=out.communicate()\n stdout=stdout.decode('utf-8')\n stdout=stdout.replace('=','')\n print(stdout)\n return stdout\ndef ARP_Response(filename):\n #to filter and extract all ARP response packets from .pcap file which was given as input.\n # tcpdump command that can be used directly in terminal to perform this operation=>\n # sudo tcpdump -r filename.pcap arp[7]=2 -n -vvv\n # '-r' indicates to apply the mentioned filter to .pcap file adjacent to it.\n # '-n' indicates that name resolution for IP addresses is not needed.\n # '7' is the offset of 1byte(out of 2 bytes) which contains the opcode of the corresponding arp packet.\n # '2' is the opcode that indicates the the corresponding packet is an arp_response packet.\n # So collectively 'arp[1]' indicates tcpdump to filter arp_request packets.\n logger.info('Filtering arp_response packets')\n out=subprocess.Popen(['tshark','-r',filename ,'arp[7]==2'],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n stdout,stderr=out.communicate()\n cap=stdout\n print(\"Displaying packets\")\n for packet in cap:\n print(packet)\n stdout=stdout.decode('utf-8')\n stdout=stdout.replace('=','')\n print(stdout)\n return stdout\ndef main():\n if len(sys.argv)!=2:\n logger.error('Insufficient number of arguments')\n print(\"Usage: .py .pcap\")\n sys.exit(1)\n\n filename=sys.argv[1]\n req_op=ARP_Request(filename)\n res_op=ARP_Response(filename)\n htmlFile=open(\"ARP_analysis_result_tshark.html\",'w')\n htmlFile.write('')\n htmlFile.write('

    ARP_REQUEST_PACKETS

    ')\n htmlFile.write(req_op)\n htmlFile.write('

    ARP_RESPONSE_PACKETS

    ')\n htmlFile.write(res_op)\n htmlFile.write('')\nif __name__==\"__main__\":\n main()","sub_path":"Cisco_ARP_project/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"201232174","text":"import win32gui\nfrom PIL import ImageGrab\n \n# Получение скриншота определенного окна\n\n# Список окон\nwindowsList = []\n\n# Каллбэк\ndef enumWindowsCallBack(hwnd, pid):\n windowsList.append((hwnd, win32gui.GetWindowText(hwnd))) # Пополняем наш список окон\n\n# Собственно получаем тот самый скриншот\ndef getScreenImage(gameName):\n win32gui.EnumWindows(enumWindowsCallBack, pid)\n\n gameNameProccess = [(hwnd, title) for hwnd, title in winlist if f'{gameName}' in title.lower()]\n \n # Получаем hwnd из первого попавшевгося окна с именем %gameName%\n gameNameProcess = gameNameProcess[0]\n hwnd = gameNameProcess[0]\n\n win32gui.SetForegroundWindow(hwnd) # Активируем окно\n bbox = win32gui.GetWindowRect(hwnd) # Получаем геометрию окна\n img = ImageGrab.grab(bbox) # Получаем фоточку\n return img\n","sub_path":"teach/collectRawData.py","file_name":"collectRawData.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"202032671","text":"import librosa\nimport numpy as np\nimport xlsxwriter as xl\n\nworkbook = xl.Workbook('data.xlsx')\nworksheet = workbook.add_worksheet()\nworksheet.write('A1','Frequency')\nworksheet.write('B1','Amplitude')\nworksheet.write('C1','Phase')\nworksheet.write('D1','Tempo')\nworksheet.write('E1','Label')\n\ndef removeNans(np_arr):\n ret=[]\n for i in np_arr:\n l=[]\n for j in i:\n if not np.isnan(j):\n l.append(j)\n ret.append(l)\n return np.array(ret)\n\nFreq=0\nAmps=0\nPhases=0\nPitch=0\n\ndef cal(filename):\n global Freq,Amps,Phases,Pitch\n y, sr = librosa.load(filename)\n x, xr = librosa.load(filename)\n y=librosa.stft(y)\n freqs = np.abs(y)\n phases = np.angle(y)\n amps = librosa.power_to_db(freqs**2, ref=np.max)\n amps=removeNans(amps)\n freqs=removeNans(freqs)\n phases=removeNans(phases)\n ffsum=0\n fasum=0\n fpsum=0\n\n for bins in freqs:\n fsums=0\n fmin=np.min(bins)\n fran=np.ptp(bins)\n for i in bins:\n if fran != 0:\n i=(i-fmin)/fran\n else:\n i=1\n fsums=fsums+i\n ffsum=ffsum+fsums\n Freq=ffsum/len(freqs)\n\n\n for bins in amps:\n asums=0\n amin=np.min(bins)\n aran=np.ptp(bins)\n for i in bins:\n if aran != 0:\n i=(i-amin)/aran\n else:\n i=1\n asums=asums+i\n fasum=fasum+asums\n Amps=fasum/len(amps)\n\n for bins in phases:\n psums=0\n pmin=np.min(bins)\n pran=np.ptp(bins)\n for i in bins:\n if pran != 0:\n i=(i-pmin)/pran\n else:\n i=1\n psums=psums+i\n fpsum=fpsum+psums\n Phases=fpsum/len(phases)\n\n Pitch=librosa.beat.tempo(y=x,sr=xr)\n\nrow=116\ncol=0\n\nfor item in range(1,19):\n print(\"Writing to workbook...\")\n cal('Nots//not'+str(item)+'.wav')\n vals=[Freq,Amps,Phases,Pitch[0],0]\n print(vals)\n for stuff in vals:\n worksheet.write(row,col,stuff)\n col+=1\n row+=1\n col=0\n print(\"Inserted \",row)\n\nfor item in range(7,27):\n print(\"Writing to workbook...\")\n cal('Knocks//Knock'+str(item)+'.wav')\n vals=[Freq,Amps,Phases,Pitch[0],1]\n print(vals)\n for stuff in vals:\n worksheet.write(row,col,stuff)\n col+=1\n row+=1\n col=0\n print(\"Inserted \",row)\n\nworkbook.close()\n","sub_path":"source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"240911256","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport subprocess\nimport pandas as pd\nfrom pandas import notnull, isnull\nfrom numpy import log10, append, nan\nimport numpy as np\nimport re\nimport json, requests, asr\nimport urllib.request\nimport urllib.parse\nimport sys\nimport csv\n\n#Personal libraries\nimport helper_functions\nfrom helper_functions import *\nfrom callbacks import *\nfrom gene_plotter import *\n\ngene=sys.argv[1]\ninput_monster=sys.argv[2]\noutput_monster=sys.argv[3]\nsp_results=sys.argv[4]\nvcf=sys.argv[5]\nwindow=sys.argv[6]\noutput=sys.argv[7]\nchop=sys.argv[8]\n\n# global variables\nserver = \"https://rest.ensembl.org\";\nhelper_functions.server=server;\n\n# Getting Gene coordinates and extend the coordinates with the specified window:\ninfo(\"Querying Ensembl for gene coordinates...\")\ngc=get_coordinates(gene);\ngc.extend(int(window))\n\n# Extract coordinates:\n\nc=gc.chrom\nstart = gc.start\nend = gc.end\ngene_start=gc.gstart\ngene_end=gc.gend\n\n# Report coordinates:\ninfo(\"\\t\\t⇰ Ensembl provided the coordinates \"+str(c)+\":\"+str(gene_start)+\"-\"+str(gene_end)+\" for gene \"+gene)\ninfo(\"\\t\\t⇰ Plot boundaries: \"+ str(c)+\":\"+str(start)+\"-\"+str(end))\n\n## Getting variant consequences for all variants in the region\ninfo(\"Querying Ensembl for SNP consequences and phenotype associations.\")\nresp=get_rsid_in_region(gc)\n#resp.to_csv(gene+\".snp.data\", index=None, sep=\",\", quoting=csv.QUOTE_NONNUMERIC);\n#resp=pd.read_table(\"snp.data\", sep=\",\")\nresp['pheno'].replace(to_replace=\"Annotated by HGMD but.*available\", value=\"\", inplace=True, regex=True)\nresp['pheno'].replace(to_replace=\"ClinVar.*not specified\", value=\"\", inplace=True, regex=True)\nresp.loc[isnull(resp.pheno), 'pheno']=\"none\"\nresp.pheno=resp.pheno.str.strip()\nresp.loc[resp.pheno==\"\", 'pheno']=\"none\"\nresp['ensembl_rs']=resp['rs']\nresp.drop('rs', axis=1, inplace=True)\ninfo(\"\\t\\t⇰ Ensembl provided\", len(resp),\"known SNPs, \", len(resp[resp.pheno!=\"none\"]), \"have associated phenotypes.\")\n\n\n\n\n\n## Get the single point results\nsp = fetch_single_point(gc, sp_results)\ninfo(\"Read\", len(sp), \"lines from single-point analysis.\");\nsp=pd.merge(sp, resp, on='ps', how='outer')\n#rs_y is the ensembl rsid\nsp.loc[isnull(sp.ensembl_rs), 'ensembl_rs']=\"novel\"\nsp.loc[isnull(sp.consequence), 'consequence']=\"novel\"\nsp=sp[notnull(sp.chr)]\nsp['ensembl_consequence']=sp['consequence']\nsp['chr']=sp['chr'].astype(int)\nsp=get_csq_novel_variants(sp, 'chr', 'ps', 'allele0', 'allele1')\n## Get the burden p from MONSTER output\nresults=pd.read_table(output_monster);\nburdenp=results.p_MONSTER[0];\nlogburdenp=-log10(burdenp);\ninfo(\"Burden p-value is\", burdenp, \"(\", logburdenp, \").\");\n\n\n\n## Get the weights and variants in burden\nvariants=read_variants_from_gene_set(gc, input_monster)\ninfo(\"Read \", variants.count()[0], \"variants in burden\")\nrawdat=pd.merge(sp, variants, on='ps', how='outer')\n\nif rawdat[rawdat.chr.isnull()].ps.size > 0 :\n\twarn(str(rawdat[rawdat.chr.isnull()].ps.size)+\" variants were not found in the single point. They will be removed, but this is not a normal thing, please check your results.\")\nrawdat.dropna(subset=['chr'], inplace=True)\n\n\n\n## Calculate LD\ninfo(\"Calculating LD...\")\ninfo(\"getld.sh\", vcf, \"chr\"+str(c)+\":\"+str(start)+\"-\"+str(end), str(sp.size), str(end-start))\ntask = subprocess.Popen([os.path.dirname(sys.argv[0])+\"/getld.sh\", vcf, \"chr\"+str(c)+\":\"+str(start)+\"-\"+str(end), str(sp.size), str(end-start)], stdout=subprocess.PIPE);\nld=pd.read_table(task.stdout, sep='\\s+');\nos.remove(\"plink.log\")\nos.remove(\"plink.nosex\")\n\n\n\n## Defining plot-specific data\ninfo(\"Defining plot-specific data...\")\nrawdat['radii']=3\ndenom=rawdat.weight[rawdat.weight.notnull()]\nif len(denom):\n\tdenom=max(denom)\nelse:\n\tdenom=1\nrawdat.loc[rawdat.weight.notnull(), 'radii']=3+20*rawdat.weight[rawdat.weight.notnull()]/denom\nrawdat['alpha']=0\nrawdat.loc[rawdat.weight.notnull(), 'alpha']=0.8\nrawdat['alpha_prevsig']=0\nrawdat.loc[(rawdat.pheno!=\"none\") & (rawdat.alpha>0), 'alpha_prevsig']=1\nfrom bokeh.palettes import PuOr8 as palette\nfrom bokeh.palettes import Viridis8 as palWeight\n# Spectral9 Palette : ['#3288bd', '#66c2a5', '#abdda4', '#e6f598', '#ffffbf', '#fee08b', '#fdae61', '#f46d43', '#d53e4f']\npalWeight.append(\"#939393\")\nrawdat['maf']=[af if af<0.5 else 1-af for af in rawdat.af]\nrawdat['mafcolor']=[palette[i] for i in pd.cut(rawdat.maf, [-1, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2, 0.3, 0.6]).cat.codes]\nrawdat['color']=\"#1F77B4\"\nrawdat['weightcolor']=[palWeight[i] for i in pd.cut(rawdat.weight, 7).cat.codes]\nrawdat['outcolor']=\"#3288bd\"\nrawdat[\"outalpha\"]=0\n\n\n# Plotting\ninfo(\"Loading Bokeh...\")\nfrom bokeh.plotting import figure, output_file, show, save\nfrom bokeh.layouts import layout, widgetbox, row, column\nfrom bokeh.models.widgets import Button, RadioButtonGroup, Div\nfrom bokeh.models import ColumnDataSource, CustomJS, HoverTool, LabelSet, OpenURL, TapTool, Axis, SaveTool\n\noutput_file(output)\np1=figure(width=1500, x_range=[start, end], tools=\"box_zoom,tap,xwheel_zoom,reset,save\", y_range=[-0.5, max(append(-log10(rawdat.p_score), logburdenp))+0.5])\n\nld_source=ColumnDataSource(data=dict(x1=ld.BP_A, x2=ld.BP_B, r2=ld.R2, dp=ld.DP))\nsource = ColumnDataSource(data=dict(ps=rawdat.ps, logsp=-log10(rawdat.p_score), radii=rawdat.radii, alpha=rawdat.alpha, color=rawdat.color, mafcolor=rawdat.mafcolor, weightcolor=rawdat.weightcolor, outcol=rawdat.outcolor, outalpha=rawdat.outalpha, alpha_prevsig=rawdat.alpha_prevsig, snpid=rawdat.rs, rs=rawdat.ensembl_rs, maf=rawdat.maf, csq=rawdat.ensembl_consequence))\n\n## TODO\n## this is the glyph for a third option in the \"current signals\"\n## that should somehow highlight the points. The below draws an underlying asterisk\n## not so good because circle is an actual circle, not a point. If this goes forward the points need to be made points.\n## the callbacks also need to be written to update alpha_prevsig in rawdat.\n#p1.asterisk(x='ps', y='logsp', size=20, alpha='alpha_prevsig', color=\"#F0027F\", line_width=2, source=source)\n\np1.circle(x='ps', y='logsp', radius='radii', fill_alpha='alpha', fill_color='color', line_color='outcol', line_alpha='outalpha', line_width=6, radius_units='screen', source=source)\np1.xaxis.visible = False\ngc2=get_coordinates(gene);\n\nif (max(rawdat.loc[rawdat.weight.notnull(), 'ps'])-min(rawdat.loc[rawdat.weight.notnull(), 'ps']) < 500):\n\teseg=gc2.end\n\tsseg=gc2.start\nelse:\n\teseg=max(rawdat.loc[rawdat.weight.notnull(), 'ps'])\n\tsseg=min(rawdat.loc[rawdat.weight.notnull(), 'ps'])\n\nsegsource=ColumnDataSource(data=dict(y0=[logburdenp], y1=[logburdenp], x0=[sseg], x1=[eseg], alpha=[1], color=[\"firebrick\"]))\np1.segment(y0='y0', y1='y1' , x0='x0', x1='x1', color='color', alpha='alpha', source=segsource, line_width=3)\nx0=rawdat.ps[100]\ny0=-log10(rawdat.p_score[100])\nx1=rawdat.ps[400]\ny1=-log10(rawdat.p_score[400])\nbzier=ColumnDataSource(data=dict(x0=[], y0=[], x1=[], y1=[], cx0=[], cy0=[], cx1=[], cy1=[], col=[]))\np1.bezier(x0='x0', y0='y0', x1='x1', y1='y1', cx0='cx0', cy0='cy0', cx1='cx1', cy1='cy1', color='col', line_width=2, source=bzier)\n\nshowhide_sp=CustomJS(args=dict(source=source), code=showhide_sp_code)\n\nchangecolor=CustomJS(args=dict(source=source), code=changecolor_code)\n\nhideburden=CustomJS(args=dict(source=segsource), code=hideburden_code)\n\nld_hover = CustomJS(args=dict(lds=ld_source, rawdat=source), code=ld_hover_code)\n\n\nsignalling=ColumnDataSource(data=dict(way=[0]))\n\nldbz_hover = CustomJS(args=dict(lds=ld_source, rawdat=source, bezier=bzier, signalling=signalling), code=ldbz_hover_code)\n\n\nchangehover = CustomJS(args=dict(signalling=signalling, rawdat=source, bezier=bzier), code=changehover_code)\n\nresp=resp[notnull(resp.pheno)]\nresp=resp[resp.pheno!=\"none\"]\nresp['alpha']=0\nresp['y']=None\nray_source=ColumnDataSource(data=dict(ps=resp.ps, alpha=resp.alpha, y=resp.y, pheno=resp.pheno))\ndisplayhits = CustomJS(args=dict(source=ray_source), code=displayhits_code)\n\n\n\np1.segment(x0='ps', x1='ps', y0=p1.y_range.start, color=\"firebrick\", y1=p1.y_range.end, alpha='alpha', source=ray_source)\ntraits=LabelSet(x='ps', y=p1.y_range.end, y_offset=-0.5, text='pheno', level='glyph', text_alpha='alpha', angle=90, angle_units='deg', text_font_size='10pt', text_align='right', text_font_style='italic', source=ray_source)\np1.add_layout(traits)\n\np1.add_tools(HoverTool(callback=ldbz_hover, tooltips=[(\"SNPid\", \"@snpid\"), (\"RSid\", \"@rs\"), (\"MAF\", \"@maf\"), (\"consequence\", \"@csq\")]))\ntaptool = p1.select(type=TapTool)\ntaptool.callback = OpenURL(url=\"http://www.ensembl.org/Homo_sapiens/Variation/Explore?db=core;v=@rs;vdb=variation\")\n\np_rbg=Div(text=\"\"\"Single-point :\"\"\", width=100)\nrbg = RadioButtonGroup(labels=[\"Hide non-burden\", \"Show all\"], active=0, callback=showhide_sp, name=\"Hello\")\np_chcolor=Div(text=\"\"\"Colouring :\"\"\", width=100)\nchcolor = RadioButtonGroup(labels=[\"None\", \"MAF\", \"Weight\"], active=0, callback=changecolor)\np_burden=Div(text=\"\"\"Show burden :\"\"\", width=100)\nburden = RadioButtonGroup(labels=[\"Yes\", \"No\"], active=0, callback=hideburden)\np_ld=Div(text=\"\"\"LD behaviour :\"\"\", width=100)\ncontrol_ld = RadioButtonGroup(labels=[\"Highlight\", \"Fountain\"], active=0, callback=changehover)\np_signals=Div(text=\"\"\"Show Existing associations :\"\"\", width=100)\ncontrol_signals = RadioButtonGroup(labels=[\"No\", \"Rays\"], active=0, callback=displayhits)\n\np1.yaxis[0].major_label_text_font_size = \"13pt\"\np1.yaxis.axis_label = '-log₁₀(p)'\np1.yaxis.axis_label_text_font_size = \"15pt\"\n\n#gc.extend(-int(window)) # <- Not needed anymore. gc object contains the gene start and gene end position: gc.gstart and gc.gend\n\np2=draw_genes(gc, window, width=1500, chop=chop)\np2.x_range=p1.x_range\n#xaxis = p2.select(dict(type=Axis, layout=\"bottom\"))[0]\np2.xaxis[0].formatter.use_scientific = False\np2.xaxis[0].major_label_text_font_size = \"13pt\"\np2.xaxis.axis_label = 'position on chromosome '+str(rawdat.chr[1].astype(np.int64))\np2.xaxis.axis_label_text_font_size = \"15pt\"\n\n\nbbox=column(row([p_rbg, rbg]), row([p_chcolor, chcolor]), row([p_burden, burden]), row([p_ld, control_ld]), row([p_signals, control_signals]))\nl=layout([[p1, bbox], [p2]])\n#p1.output_backend = \"svg\" #NOT FUNCTIONAL\n#p2.output_backend = \"svg\"\nsave(l)\nrawdat.to_csv(output+\".csv\", index=False)\nld.to_csv(output+\".ld.csv\",index=False)\n","sub_path":"plotburden.py","file_name":"plotburden.py","file_ext":"py","file_size_in_byte":10266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"14669754","text":"\"\"\"\n4\nRDD\n4\n[1,2,3,4]\nDD\n1\n[42]\nRRD\n6\n[1,1,2,3,5,8]\nD\n0\n[]\n\n1\nRDD\n4\n[1,2,3,4]\n\n1\nRRR\n0\n[]\n\"\"\"\nimport collections\n\nN = int(input())\nresult = []\n\nfor _ in range(N):\n Command = list(map(str, input()))\n M = int(input())\n\n item_input = input()\n if item_input == '[]':\n Item = []\n else:\n item_input = item_input.replace('[', '').replace(']', '').replace(',', ' ').split(' ')\n Item = collections.deque(list(map(int, item_input)))\n\n is_success, is_reverse = True, False\n for command in Command:\n if command == 'R':\n is_reverse = not is_reverse\n\n elif command == 'D':\n if len(Item) == 0:\n is_success = False\n break\n else:\n if is_reverse:\n Item.pop()\n else:\n Item.popleft()\n # print(command, Item, is_success)\n\n if is_success:\n if is_reverse:\n Item.reverse()\n result.append(Item)\n else:\n result.append(Item)\n else:\n result.append('error')\n\nfor item in result:\n if item == 'error':\n print(item)\n else:\n print('[{}]'.format(','.join(map(str, item))))\n","sub_path":"backjoon/Queue/5430_AC.py","file_name":"5430_AC.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"497512953","text":"class DoubleNode: \n def __init__(self, key=None, value=None):\n self.key = key\n self.value = value\n self.next = None\n self.previous = None\n\nclass LRU_Cache: \n def __init__(self, capacity):\n self.hashmap = dict()\n self.capacity = capacity\n self.head = DoubleNode(0,0)\n self.tail = DoubleNode(0,0)\n self.head.next = self.tail\n self.tail.previous = self.head\n \n def get(self, key):\n if key not in self.hashmap:\n return -1\n \n if key in self.hashmap:\n node = self.hashmap[key]\n self._remove(node)\n self._insert(node)\n return node.value\n \n def put(self, key, value):\n if key in self.hashmap:\n self._remove(self.hashmap[key])\n \n if key not in self.hashmap:\n node = DoubleNode(key, value)\n self._insert(node)\n self.hashmap[key] = node\n \n if len(self.hashmap) > self.capacity:\n node = self.head.next \n self._remove(node)\n \n if node.key is None:\n del self.hashmap[node.key] \n \n def _remove(self, node): # cite: 1 \n if self.head is None or node is None:\n return \n \n if self.head == node:\n self.head = node.next\n \n if node.next is not None:\n node.next.previous = node.previous\n \n if node.previous is not None:\n node.previous.next = node.next\n \n def _insert(self, node):\n node.next = self.head\n node.previous = None\n\n if self.head is not None:\n self.head.previous = node\n self.head = node\n\n\n# Test Case 1 - Normal \ncache_one = LRU_Cache(5)\ncache_one.put(1, 3)\ncache_one.put(2, 4) \nprint('test_1_get1', cache_one.get(1)) # returns 3\nprint('test_1_get2', cache_one.get(2)) # returns 4\nprint('test_1_get3', cache_one.get(3)) # return -1\ncache_one.put(3, 5)\nprint('test_1_get2', cache_one.get(2)) # returns 4\ncache_one.put(4, 5)\nprint('test_1_get1', cache_one.get(1)) # returns 3\nprint('test_1_get3', cache_one.get(3)) # returns 5\ncache_one.put(3, 6)\nprint('test_1_get3', cache_one.get(3)) # returns 6\n\n# Test Case 2 - Empty \ncache_two = LRU_Cache(0)\ncache_two.put(1, 3)\ncache_two.put(2, 4) \ncache_two.put(3, 5) \nprint('test_2_get1', cache_two.get(1)) # returns 3\nprint('test_2_get2', cache_two.get(2)) # returns 4\nprint('test_2_get3', cache_two.get(3)) # return 5\ncache_two.put(3, 5)\nprint('test_2_get2', cache_two.get(2)) # returns 4\ncache_two.put(4, 5)\nprint('test_2_get1', cache_two.get(1)) # returns 3\nprint('test_2_get3', cache_two.get(3)) # returns 5\ncache_two.put(3, 6)\nprint('test_2_get3', cache_two.get(3)) # returns 5\n\n# Test Case 3 - Overflow \ncache_three = LRU_Cache(3)\ncache_three.put(1, 3)\ncache_three.put(2, 4)\nprint('test_3_get1', cache_three.get(1)) # returns 3\nprint('test_3_get2', cache_three.get(2)) # returns 4\nprint('test_3_get3', cache_three.get(3)) # returns -1\nprint('test_3_get3', cache_three.get(1)) # returns 3\nprint('test_3_get3', cache_three.get(2)) # returns 4\nprint('test_3_get3', cache_three.get(3)) # returns -1\nprint('test_3_get3', cache_three.get(1)) # returns 3\n\n\n","sub_path":"Show me the data structures/problem_1.py","file_name":"problem_1.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"163788613","text":"import collections\nimport threading\nimport argparse\nimport os\nimport time\nimport re\nimport ctypes\nimport sys\n\nfrom pynput import keyboard\n\nfrom win10toast import ToastNotifier\n\nimport pyaudio\nimport wave\n\ndef on_press_waiting(key):\n global current, currentOkay, frames, text, stream, t, built\n if (key in COMBINATION):\n current.add(key)\n if (all(k in current for k in COMBINATION)):\n currentOkay = True\n\ndef on_release_waiting(key):\n global current, currentOkay, frames, text, stream, t, built\n try:\n current.remove(key)\n except KeyError:\n pass\n if (currentOkay and len(current) == 0):\n currentOkay = False\n print(\"Recording name? \", end=\"\", flush=True)\n text = \"\"\n with keyboard.Listener(on_press=on_press_capturing, suppress=True) as capturer:\n capturer.join()\n text = re.sub(\"[^a-zA-Z0-9 -]\", \"\", text)\n if (not len(text)):\n text = str(time.time())\n if (args.path):\n string = args.path + text + \".wav\"\n else:\n if (not os.path.isdir(\"clips\")):\n os.makedirs(\"clips\", exist_ok=True)\n string = \"clips/\" + text + \".wav\"\n while (True):\n try:\n wf = wave.open(string, \"wb\")\n print(f\"Saved to {text}.wav \")\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b\"\".join(frames))\n wf.close()\n frames = collections.deque(\"\", int(RATE / CHUNK * RECORD_SECONDS))\n print(\"* recording\", end=\"\\r\", flush=True)\n if (not args.quiet):\n if (built):\n if (not t.show_toast(\"Recording Saved Succcessfully\", f\"{text}.wav\", threaded=True, duration=args.alert, icon_path=os.path.join(sys._MEIPASS, \"icon.ico\"))):\n print(\"Skipped notification because there was one already active.\")\n else:\n if (not t.show_toast(\"Recording Saved Succcessfully\", f\"{text}.wav\", threaded=True, duration=args.alert)):\n print(\"Skipped notification because there was one already active.\")\n break\n except:\n if (not args.quiet):\n if (not t.show_toast(\"Recording Failed\", \"Maybe try again or something\", threaded=True, duration=args.alert, icon_path=os.path.join(sys._MEIPASS, \"icon.ico\"))):\n print(\"Skipped notification because there was one already active.\")\n\ndef on_press_capturing(key):\n global current, currentOkay, frames, text, stream, t, built\n try:\n text += key.char\n print(key.char, end=\"\", flush=True)\n except AttributeError:\n pass\n if (key == keyboard.Key.space):\n text += \" \"\n print(\" \", end=\"\", flush=True)\n if (key == keyboard.Key.backspace):\n if (len(text) > 0):\n print(\"\\b \\b\", end=\"\", flush=True)\n if (len(text) > 1):\n text = text[:-1]\n else:\n text = \"\"\n if (key == keyboard.Key.enter):\n print(\"\\r\", end=\"\", flush=True)\n return False\n\nclass AudioGatherer(threading.Thread):\n def run(self):\n global current, currentOkay, frames, text, stream, t, built\n print(\"* recording\", end=\"\\r\", flush=True)\n while True:\n data = stream.read(CHUNK)\n frames.append(data)\n\nglobal current, currentOkay, frames, text, stream, t, built\n\nctypes.windll.kernel32.SetConsoleTitleW(\"EchoPlay\")\n\nif hasattr(sys, \"frozen\") and getattr(sys, \"frozen\") and hasattr(sys, \"_MEIPASS\"):\n built = True\nelse:\n built = False\n\nt = ToastNotifier()\n\nparser = argparse.ArgumentParser(description=\"ShadowPlay for your ears?\\nThe hotkey is right alt + f11.\")\n\nparser.add_argument(\"-l\", \"--list\", help=\"display device list\", action=\"store_true\")\nparser.add_argument(\"-s\", \"--seconds\", help=\"how many seconds should be recorded. defaults to 300 (5 min)\", default=300, type=int)\nparser.add_argument(\"-q\", \"--quiet\", help=\"disable notification when a recording is saved\", action=\"store_true\")\nparser.add_argument(\"-a\", \"--alert\", help=\"how many seconds should the alert stick around for. defaults to 4\", default=4, type=int)\nparser.add_argument(\"-p\", \"--path\", help=\"where should the clips go. defaults to new folder \\\"clips\\\" in exe's path\")\nparser.add_argument(\"-d\", \"--device\", help=\"id of sound device to use. defaults to default input device. can be a string to search for\")\nparser.add_argument(\"-c\", \"--channels\", help=\"how many channels does the device need. defaults to detected max channels\", type=int)\nparser.add_argument(\"-r\", \"--rate\", help=\"sample rate. defaults to detected rate\", type=int)\nparser.add_argument(\"-b\", \"--bits24\", help=\"use 24 bit integer sound instead of 16\", action=\"store_true\")\n\nargs = parser.parse_args()\n\np = pyaudio.PyAudio()\n\nif (args.list):\n from prettytable import PrettyTable\n hosts = PrettyTable([\"index\", \"structVersion\", \"type\", \"name\", \"deviceCount\", \"defaultInputDevice\", \"defaultOutputDevice\"])\n devices = PrettyTable([\"index\", \"structVersion\", \"name\", \"hostApi\", \"maxInputChannels\", \"maxOutputChannels\", \"defaultLowInputLatency\", \"defaultLowOutputLatency\", \"defaultHighInputLatency\", \"defaultHighOutputLatency\", \"defaultSampleRate\"])\n\n for i in range(0, p.get_host_api_count()):\n hosts.add_row(p.get_host_api_info_by_index(i).values())\n \n for i in range(0, p.get_device_count()):\n if (p.get_device_info_by_index(i)[\"maxInputChannels\"] > 0):\n devices.add_row(p.get_device_info_by_index(i).values())\n\n print(hosts.get_string(fields=[\"index\", \"type\", \"name\", \"deviceCount\", \"defaultInputDevice\", \"defaultOutputDevice\"]))\n print(devices.get_string(fields=[\"index\", \"name\", \"hostApi\", \"maxInputChannels\", \"maxOutputChannels\"]))\n sys.exit()\n\nDEVICE = p.get_default_input_device_info()[\"index\"]\nif (args.device):\n if (args.device.isdigit()):\n DEVICE = int(args.device)\n else:\n count = p.get_device_count()\n found = False\n for i in range(0, count):\n device = p.get_device_info_by_index(count - 1 - i)\n if (device[\"maxInputChannels\"] > 0 and args.device in device[\"name\"]):\n DEVICE = count - 1 - i\n found = True\n if (not found):\n raise ValueError(\"Cant find that device\")\ndevice = p.get_device_info_by_index(DEVICE)\nCHANNELS = args.channels or device[\"maxInputChannels\"]\nRATE = args.rate or int(device[\"defaultSampleRate\"])\nRECORD_SECONDS = args.seconds\nCHUNK = 1024\nif (args.bits24):\n FORMAT = pyaudio.paInt24\nelse:\n FORMAT = pyaudio.paInt16\n\nCOMBINATION = {keyboard.Key.alt_r, keyboard.Key.f11}\ncurrent = set()\ncurrentOkay = False\n\nname = device[\"name\"]\nprint(f\"Recording \\\"{name}\\\" for {RECORD_SECONDS} seconds on {CHANNELS} channels at {RATE}HZ\")\n\nstream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK,\n input_device_index=DEVICE)\n\nframes = collections.deque(\"\", int(RATE / CHUNK * RECORD_SECONDS))\n\naudioGatherer = AudioGatherer()\naudioGatherer.start()\n\nwith keyboard.Listener(on_press=on_press_waiting, on_release=on_release_waiting) as listener:\n listener.join()\n\n# stream.stop_stream()\n# stream.close()\n# p.terminate()\n","sub_path":"EchoPlay.py","file_name":"EchoPlay.py","file_ext":"py","file_size_in_byte":7476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"71701648","text":"from django.db.models import Avg, Min, Sum\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework import exceptions, serializers, validators\n\nfrom area.models import City, District\nfrom area.serializers import CitySerializer, DistrictSerializer\nfrom verification.models import MobileVerificationCode\nfrom utils.serializers import I18NCharField, NestedSerializerMixin, RelatedSerializerField\n\nfrom review.models import *\nfrom order.models import *\n\nfrom .models import *\n\nclass AccountSerializer(serializers.Serializer):\n mobile = serializers.CharField(\n max_length=255,\n write_only=True,\n )\n code = serializers.CharField(\n max_length=6,\n write_only=True,\n )\n password = serializers.CharField(\n max_length=255,\n write_only=True,\n )\n first_name = serializers.CharField(\n max_length=255,\n required=False,\n write_only=True,\n )\n last_name = serializers.CharField(\n max_length=255,\n required=False,\n write_only=True,\n )\n is_client = serializers.BooleanField(\n default=True,\n write_only=True,\n )\n is_instructor = serializers.BooleanField(\n default=False,\n write_only=True,\n )\n\n def validate_mobile(self, value):\n if User.objects.filter(mobile=value).exists():\n detail = _('This mobile phone number has been registered.')\n raise serializers.ValidationError(detail=detail)\n\n return value\n\n def validate(self, data):\n mobile = data.get('mobile')\n code = data.pop('code')\n\n if MobileVerificationCode.objects.filter(mobile=mobile, code=code).exists():\n mobile_verification_code = MobileVerificationCode.objects.get(mobile=mobile, code=code)\n\n if mobile_verification_code.is_valid():\n return data\n\n detail = _('Verification code expired.')\n raise exceptions.AuthenticationFailed(detail=detail)\n\n detail = _('Invalid verification code.')\n raise exceptions.AuthenticationFailed(detail=detail)\n\n raise ValidationError()\n\n def create(self, validated_data):\n return User.objects.create_user(**validated_data)\n\nclass PasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField(\n max_length=255,\n write_only=True,\n )\n password = serializers.CharField(\n max_length=255,\n write_only=True,\n )\n\n def update(self, instance, validated_data):\n if not instance.check_password(validated_data['old_password']):\n detail = _('Invalid password.')\n raise exceptions.AuthenticationFailed(detail=detail)\n \n instance.set_password(validated_data['password'])\n instance.save()\n return instance\n\nclass AddressSerializer(NestedSerializerMixin, serializers.ModelSerializer):\n display_address = serializers.SerializerMethodField(\n read_only=True,\n )\n\n def get_display_address(self, obj):\n return '%(formatted_address)s %(apt)s' % {\n 'formatted_address': obj.formatted_address,\n 'apt': obj.apt,\n }\n\n class Meta:\n model = Address\n fields = (\n 'pk',\n 'display_address',\n 'formatted_address',\n 'latitude',\n 'longitude',\n 'province',\n 'city',\n 'district',\n 'township',\n 'neighborhood',\n 'building',\n 'apt',\n 'adcode',\n 'is_primary',\n )\n read_only_fields = (\n 'pk',\n 'display_address',\n )\n\nclass LanguageSerializer(NestedSerializerMixin, serializers.ModelSerializer):\n class Meta:\n model = Language\n fields = (\n 'pk',\n 'language',\n 'level',\n )\n read_only_fields = (\n 'pk',\n )\n\nclass ImageSerializer(NestedSerializerMixin, serializers.ModelSerializer):\n class Meta:\n model = Image\n fields = (\n 'pk',\n 'image',\n )\n read_only_fields = (\n 'pk',\n )\n\nclass VideoSerializer(NestedSerializerMixin, serializers.ModelSerializer):\n class Meta:\n model = Video\n fields = (\n 'pk',\n 'url',\n )\n read_only_fields = (\n 'pk',\n )\n\nclass UserAvatarSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = (\n 'avatar',\n )\n\nclass UserPortraitSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = (\n 'portrait',\n )\n\nclass UserSerializer(serializers.ModelSerializer):\n profession = I18NCharField()\n motto = I18NCharField()\n detail = I18NCharField()\n city = RelatedSerializerField(\n queryset=City.objects.filter(is_active=True),\n serializer=CitySerializer,\n )\n d2d_districts = RelatedSerializerField(\n many=True,\n queryset=District.objects.filter(is_active=True),\n serializer=DistrictSerializer,\n )\n primary_address = serializers.SerializerMethodField(\n read_only=True,\n )\n profile_integrity = serializers.SerializerMethodField(\n read_only=True,\n )\n minimum_hourly_rate = serializers.SerializerMethodField(\n read_only=True,\n )\n popularity = serializers.SerializerMethodField(\n read_only=True,\n )\n rating = serializers.SerializerMethodField(\n read_only=True,\n )\n hours = serializers.SerializerMethodField(\n read_only=True,\n )\n my_client_count = serializers.SerializerMethodField(\n read_only=True,\n )\n my_instructor_count = serializers.SerializerMethodField(\n read_only=True,\n )\n\n def get_primary_address(self, obj):\n ret = obj.addresses.filter(is_primary=True, is_active=True).first()\n if ret is not None:\n return AddressSerializer(ret).data\n\n ret = obj.addresses.filter(is_active=True).first()\n if ret is not None:\n return AddressSerializer(ret).data\n\n return None\n\n def get_profile_integrity(self, obj):\n ret = 0\n\n if getattr(obj, 'email'):\n ret += 10\n\n if getattr(obj, 'avatar'):\n ret += 20\n\n if getattr(obj, 'first_name') and getattr(obj, 'last_name'):\n ret += 20\n\n if getattr(obj, 'date_of_birth'):\n ret += 10\n\n if getattr(obj, 'gender') and getattr(obj, 'gender') != 'C':\n ret += 10\n\n if getattr(obj, 'nationality'):\n ret += 10\n\n if getattr(obj, 'profession').get('en') and getattr(obj, 'profession').get('zh-hans'):\n ret += 10\n\n if getattr(obj, 'detail'):\n ret += 10\n\n return ret\n\n def get_minimum_hourly_rate(self, obj):\n ret = obj.courses.filter(\n is_active=True,\n ).aggregate(\n Min('hourly_rate'),\n )['hourly_rate__min']\n \n if ret is not None:\n return ret\n\n return 0 \n\n def get_popularity(self, obj):\n return obj.liked_by.count()\n\n def get_rating(self, obj):\n ret = Review.objects.filter(\n order__course_snapshot__course__instructor=obj,\n is_active=True,\n ).aggregate(\n Avg('rating'),\n )['rating__avg']\n \n if ret is not None:\n return ret * 5.0 / 3.0\n\n return 0.0\n\n def get_hours(self, obj):\n ret = Order.objects.filter(\n course_snapshot__course__instructor=obj,\n status__in=[Order.COMPLETED, Order.REVIEWED],\n ).aggregate(\n Sum('schedule_snapshot__duration'),\n )['schedule_snapshot__duration__sum']\n\n if ret is not None:\n return int(ret.total_seconds()) / 3600\n\n return 0\n\n def get_my_client_count(self, obj):\n return obj.get_my_clients().count()\n\n def get_my_instructor_count(self, obj):\n return obj.get_my_instructors().count()\n\n class Meta:\n model = User\n fields = (\n 'pk',\n 'uuid',\n 'mobile',\n 'email',\n 'avatar',\n 'portrait',\n 'signature',\n 'first_name',\n 'last_name',\n 'date_of_birth',\n 'gender',\n 'nationality',\n 'profession',\n 'motto',\n 'detail',\n 'city',\n 'd2d_surcharge',\n 'd2d_districts',\n 'primary_address',\n 'profile_integrity',\n 'minimum_hourly_rate',\n 'popularity',\n 'rating',\n 'hours',\n 'my_client_count',\n 'my_instructor_count',\n 'is_client',\n 'is_instructor',\n 'is_approved',\n 'is_active',\n 'created_at',\n 'updated_at',\n )\n read_only_fields = (\n 'pk',\n 'uuid',\n 'mobile',\n 'avatar',\n 'portrait',\n 'primary_address',\n 'profile_integrity',\n 'minimum_hourly_rate',\n 'popularity',\n 'rating',\n 'hours',\n 'my_client_count',\n 'my_instructor_count',\n 'is_approved',\n 'is_active',\n 'created_at',\n 'updated_at',\n )","sub_path":"pingo_space/account/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":9495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"174891686","text":"import RPi.GPIO as GPIO\nfrom time import sleep\n\nclass PPicar:\n\t#const variable\t\n\tMOTOR_LEFT_A = 12\n\tMOTOR_LEFT_B = 11\n\tMOTOR_LEFT_PWM = 35\n\n\tMOTOR_RIGHT_A = 15\n\tMOTOR_RIGHT_B = 13\n\tMOTOR_RIGHT_PWM = 37\n\n\n\t#base setting\n\tGPIO.setwarnings(False)\n\n\tGPIO.setup(MOTOR_LEFT_A, GPIO.out)#set left motor pin to output\n\tGPIO.setup(MOTOR_LEFT_B, GPIO.out)\n\tGPIO.setup(MOTOR_LEFT_PWM, GPIO.out)\n\n\tGPIO.setup(MOTOR_RIGHT_A, GPIO.out)#set right moto pin to output\n\tGPIO.setup(MOTOR_RIGHT_B, GPIO.out)\n\tGPIO.setup(MOTOR_RIGHT_PWM, GPIO.out)\n\n\tLeftPwm=GPIO.PWM(MOTOR_LEFT_PWM, 100)#create PWM\n\tRightPwm=GPIO.PWM(MOTOR_RIGHT_PWM, 100)\n\t\n\t#constructor\n\tdef __init__(self):\n\t\tprint(\"vroom!\")\n\n\t#public method\n\tdef go(leftDirection, rightDirection, leftSpeed, rightSpeed, duration_time):\n\t\tsetDirection(leftDirection, rightDirection)#set direction\n\t\tsetSpeed(leftSpeed, rightSpeed)#set speed\n\t\tsleep(duration_time)#set duration time\n\t\n\t#getter\n\t#empty\n\n\n\n\t#private method\n\tdef REVERSE(x):\n\t\treturn not x\n\n\tdef setDirection(leftDirection, rightDirection):\n\t\tGPIO.output(MOTOR_LEFT_A, not leftDirection)#left motor\n\t\tGPIO.output(MOTOR_LEFT_B, leftDirection)\n\n\t\tGPIO.output(MOTOR_RIGHT_A, rightDirection)#right motor\n\t\tGPIO.output(MOTOR_RIGHT_B, not rightDirection)\n\n\tdef setSpeed(leftSpeed, rightSpeed):\n\t\tLeftPwm.ChangeDutyCycle(leftSpeed)\n\t\tRightPwm.ChangeDutyCycle(rightSpeed)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"proj3/protoType/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"108555923","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\nimport pandas as pd\nimport re #正規運算式\nimport math\nimport time\nimport numpy as np \nimport csv\nfrom datetime import datetime, timedelta\nfrom datetime import date\nimport pickle\n\n# In[8]:\n\n#讀入指數價格資料\nIndexprice = pd.read_excel('./Index.xlsx')\n#Indexprice\n\n\n# In[9]:\n\n\n#Indexprice轉換成datetime格式\nfor index,value in Indexprice.iterrows():\n value[\"Date\"] = value[\"Date\"].to_pydatetime().date()\n #print(type(value[\"Unnamed: 0\"]))\n\n# In[10]:\n\n#引入stopword\nstopword = []\nwith open('./stopwords.txt','r',encoding = 'utf8') as file:\n for line in file.readlines():\n line = line.strip()\n stopword.append(line)\n\n#讀入文章\nbbs = pd.read_excel('./bbs.xlsx')\nforum = pd.read_excel('./forum.xlsx')\n#news = pd.read_excel('./news.xlsx')\n\n\n# In[11]:\n\n\n#合併三個文章檔案\ncollections = pd.concat([forum],axis=0, ignore_index=True,sort=False)\n\n\n# In[12]:\n\n#蘋果相關文章(沒有apple和iphone)\n#設定起始日和結束日\n#start_date = datetime(2016,1,1).date()\n#end_date = datetime(2018,12,31).date()\nfiltered_list_apple_noENG = []\nstart_date = datetime(2016,1,1)\nend_date = datetime(2018,12,31)\n\nfor index,value in collections.iterrows():\n if (start_date <= value[\"post_time\"].to_pydatetime()) and (end_date >=value[\"post_time\"].to_pydatetime()):\n if (\"蘋果\" in (str(value[\"content\"])+str(value[\"title\"]))) or(\"蘋概股\" in (str(value[\"content\"])+str(value[\"title\"]))) or (\"台積電\" in (str(value[\"content\"])+str(value[\"title\"]))) or (\"鴻海\" in (str(value[\"content\"])+str(value[\"title\"])))or (\"和碩\" in (str(value[\"content\"])+str(value[\"title\"]))) or(\"大立光\" in (str(value[\"content\"])+str(value[\"title\"]))) or (\"可成\" in (str(value[\"content\"])+str(value[\"title\"])))or (\"鴻準\" in (str(value[\"content\"])+str(value[\"title\"]))) or (\"日月光\" in (str(value[\"content\"])+str(value[\"title\"]))) or (\"廣達\" in (str(value[\"content\"])+str(value[\"title\"])))or (\"台郡\" in (str(value[\"content\"])+str(value[\"title\"]))) or(\"臻鼎\" in (str(value[\"content\"])+str(value[\"title\"]))) or (\"穩懋\" in (str(value[\"content\"])+str(value[\"title\"])))or (\"宏捷科\" in (str(value[\"content\"])+str(value[\"title\"]))):\n filtered_list_apple_noENG.append([value[\"post_time\"].to_pydatetime(),str(value[\"title\"])+str(value[\"content\"])])\n\n# In[13]:\n\n\n#len(filtered_list_apple_noENG)\n\n\n# In[16]:\n\n\n#將未開盤日的文章日期改為上一次開盤日(while,按太多次不會怎樣)\nIndex_date = []\nfor i in range(Indexprice.shape[0]):\n Index_date.append(Indexprice.iloc[i][0].date())\n \nfor i in range(len(filtered_list_apple_noENG)):\n if filtered_list_apple_noENG[i][0].date() >= datetime(2016,1,4).date():\n while filtered_list_apple_noENG[i][0].date() not in Index_date:\n filtered_list_apple_noENG[i][0] = filtered_list_apple_noENG[i][0] - timedelta(days=1)\n\n\n# In[18]:\n\n\ndef remove_stopwords(content,n_gram,stopword):\n content = re.sub(r'[^\\w]','',content) #移除非文字字元(符號)\n content = re.sub(r'[A-Za-z0-9]','',content) #移除英文&數字\n \n returnlist = []\n \n for i in range(len(content) - n_gram + 1):\n word = content[i:i + n_gram]\n if word not in stopword:\n returnlist.append(word)\n return returnlist\n\n\n# In[19]:\n\n\ndef tfdf(Dict_n_gram, tfdf_n):\n for key in Dict_n_gram.keys():\n for word in Dict_n_gram[key]:\n if word not in tfdf_n.keys():\n tfdf_n[word] = [1,0]\n if word in tfdf_n.keys():\n tfdf_n[word][0] += 1\n for word1 in set(Dict_n_gram[key]):\n tfdf_n[word1][1] += 1\n return tfdf_n\n\n\n# In[20]:\n\n\ndef get_gram(Dict_n_gram,n,article,stopword):\n for i in article.keys():\n Dict_n_gram[i] = remove_stopwords(article[i],n,stopword)\n return Dict_n_gram\n\n\n# In[21]:\n\n\ndef get_gram_per_article(Dict_n_gram,n,article,stopword):\n Dict_n_gram[0] = remove_stopwords(article,n,stopword)\n return Dict_n_gram\n\n\n# In[22]:\n\n\ndef del_outlier(tfdf_n):\n for key in list(tfdf_n.keys()):\n if tfdf_n[key][1] <= 5 or tfdf_n[key][0] <= 10:\n del tfdf_n[key]\n return tfdf_n\n\n\n# In[23]:\n\n\ndef del_repeat(tfdf_small,tfdf_big):\n for key_small in list(tfdf_small.keys()):\n for key_big in tfdf_big.keys():\n if (key_small in key_big) and (abs((tfdf_big[key_big][0] - tfdf_small[key_small][0])) / tfdf_small[key_small][0] < 0.1):\n del tfdf_small[key_small]\n break\n return tfdf_small\n\n\n# In[67]:\n\n\ndef generate_keyword(article,all_article):\n Dict_2_gram = {}\n Dict_3_gram = {}\n Dict_4_gram = {}\n Dict_5_gram = {}\n Dict_6_gram = {}\n Dict_2_gram = get_gram(Dict_2_gram,2,article,stopword)\n Dict_3_gram = get_gram(Dict_3_gram,3,article,stopword)\n Dict_4_gram = get_gram(Dict_4_gram,4,article,stopword)\n Dict_5_gram = get_gram(Dict_5_gram,5,article,stopword)\n Dict_6_gram = get_gram(Dict_6_gram,6,article,stopword)\n \n print(\"gram\")\n tfdf_2 = {}\n tfdf_3 = {}\n tfdf_4 = {}\n tfdf_5 = {}\n tfdf_6 = {}\n tfdf_2 = del_outlier(tfdf(Dict_2_gram,tfdf_2))\n tfdf_3 = del_outlier(tfdf(Dict_3_gram,tfdf_3))\n tfdf_4 = del_outlier(tfdf(Dict_4_gram,tfdf_4))\n tfdf_5 = del_outlier(tfdf(Dict_5_gram,tfdf_5))\n tfdf_6 = del_outlier(tfdf(Dict_6_gram,tfdf_6))\n #print(tfdf_2)\n \n #tfdf_2 = del_outlier(tfdf_2)\n #tfdf_3 = del_outlier(tfdf_3)\n #tfdf_4 = del_outlier(tfdf_4)\n #tfdf_5 = del_outlier(tfdf_5)\n #tfdf_6 = del_outlier(tfdf_6)\n #print(tfdf_2)\n print(\"outlier\")\n tfdf_2 = del_repeat(tfdf_2,tfdf_3)\n tfdf_3 = del_repeat(tfdf_3,tfdf_4)\n tfdf_4 = del_repeat(tfdf_4,tfdf_5)\n tfdf_5 = del_repeat(tfdf_5,tfdf_6)\n \n tfdf_2to6 = dict(**tfdf_2 , **tfdf_3, **tfdf_4, **tfdf_5, **tfdf_6)\n \n num_doc = len(Dict_2_gram)\n \n for key in tfdf_2to6.keys():\n tfdf_2to6[key].append((1 + math.log(tfdf_2to6[key][0])) * math.log(num_doc / tfdf_2to6[key][1]))\n \n print(\"tfidf\")\n \n Dict_all = {}\n for i in range(len(all_article)):\n Dict_all[i] = all_article[i][1]\n Dict_all[i] = re.sub(r'[^\\w]','',Dict_all[i]) #移除非文字字元(符號)\n Dict_all[i] = re.sub(r'[A-Za-z0-9]','',Dict_all[i]) #移除英文&數字\n \n for key in tfdf_2to6.keys():\n tfdf_2to6[key].append(0)\n for index in Dict_all.keys():\n tfdf_2to6[key][3] += Dict_all[index].count(key)\n print(\"alldict\") #這裡要跑1hr多\n\n \n num_all = len(Dict_all) \n \n return tfdf_2to6,num_all,num_doc\n #for key in tfdf_2to6.keys():\n #tfdf_2to6[key].append(((tfdf_2to6[key][0] - ((tfdf_2to6[key][3]/num_all) * num_doc)) ** 2)/((tfdf_2to6[key][3]/num_all) * num_doc))\n \n \n #tfdf_2to6 = sorted(tfdf_2to6.items(),key = lambda x:x[1][4], reverse = True) #按照tf卡方排\n \n #return tfdf_2to6[0:200]\n\n\n# In[64]:\n\n\ndef generate_keyword_tf(per_article):\n Dict_2_gram = {}\n Dict_3_gram = {}\n Dict_4_gram = {}\n Dict_5_gram = {}\n Dict_6_gram = {}\n Dict_2_gram = get_gram_per_article(Dict_2_gram,2,per_article,stopword)\n Dict_3_gram = get_gram_per_article(Dict_3_gram,3,per_article,stopword)\n Dict_4_gram = get_gram_per_article(Dict_4_gram,4,per_article,stopword)\n Dict_5_gram = get_gram_per_article(Dict_5_gram,5,per_article,stopword)\n Dict_6_gram = get_gram_per_article(Dict_6_gram,6,per_article,stopword)\n \n tfdf_2 = {}\n tfdf_3 = {}\n tfdf_4 = {}\n tfdf_5 = {}\n tfdf_6 = {}\n tfdf_2 = tfdf(Dict_2_gram,tfdf_2)\n tfdf_3 = tfdf(Dict_3_gram,tfdf_3)\n tfdf_4 = tfdf(Dict_4_gram,tfdf_4)\n tfdf_5 = tfdf(Dict_5_gram,tfdf_5)\n tfdf_6 = tfdf(Dict_6_gram,tfdf_6)\n\n \n tfdf_2 = del_repeat(tfdf_2,tfdf_3)\n tfdf_3 = del_repeat(tfdf_3,tfdf_4)\n tfdf_4 = del_repeat(tfdf_4,tfdf_5)\n tfdf_5 = del_repeat(tfdf_5,tfdf_6)\n \n tfdf_2to6 = dict(**tfdf_2 , **tfdf_3, **tfdf_4, **tfdf_5, **tfdf_6)\n\n return tfdf_2to6\n\n\n# In[70]:\n\nincrease_article = {}\ndecrease_article = {}\nincrease_keyword = []\ndecrease_keyword = []\nincrease_keyword_dict = {}\ndecrease_keyword_dict = {}\n\nprint('標價')\na = 0\nb = 0\nday = 3\nsigma = 1.5\n'''\nfor i in range(day,Indexprice.shape[0] - day):\n if Indexprice.iloc[i,1] - Indexprice.iloc[i + day,1] < (sigma * Indexprice[\"Index by 收盤價\"][i-10:i-1].std() * -1) :\n date1 = Indexprice.iloc[i,0].date()\n #print(type(date1))\n for j in range(len(filtered_list_apple_noENG)):\n #print(type(filtered_list[j][0].date()))\n if filtered_list_apple_noENG[j][0].date() == date1:\n increase_article[a] = filtered_list_apple_noENG[j][1]\n a += 1\n if Indexprice.iloc[i,1] - Indexprice.iloc[i + day,1] > (sigma * Indexprice[\"Index by 收盤價\"][i-10:i-1].std()) :\n date2 = Indexprice.iloc[i,0].date()\n for j in range(len(filtered_list_apple_noENG)):\n if filtered_list_apple_noENG[j][0].date() == date2:\n decrease_article[b] = filtered_list_apple_noENG[j][1]\n b += 1\n\n'''\nday = 3\nmulti = 1\nfor i in range(10,Indexprice.shape[0] - day):\n if Indexprice.iloc[i,4] - Indexprice.iloc[i + day,4] < (multi * Indexprice[\"ATR\"][i] * -1) :\n date1 = Indexprice.iloc[i,0].date()\n #print(type(date1))\n for j in range(len(filtered_list_apple_noENG)):\n #print(type(filtered_list[j][0].date()))\n if filtered_list_apple_noENG[j][0].date() == date1:\n increase_article[a] = filtered_list_apple_noENG[j][1]\n a += 1\n if Indexprice.iloc[i,4] - Indexprice.iloc[i + day,4] > (multi * Indexprice[\"ATR\"][i]) :\n date2 = Indexprice.iloc[i,0].date()\n for j in range(len(filtered_list_apple_noENG)):\n if filtered_list_apple_noENG[j][0].date() == date2:\n decrease_article[b] = filtered_list_apple_noENG[j][1]\n b += 1\n\n# In[ ]:\n\n\nnum_all = 0\nnum_doc = 0\nincrease_keyword_dict,num_all,num_doc = generate_keyword(increase_article,filtered_list_apple_noENG)\n\n\n# In[ ]:\n\n\ndecrease_keyword_dict,num_all,num_doc = generate_keyword(decrease_article,filtered_list_apple_noENG)\n\n\n# In[ ]:\n\n\nfor key in increase_keyword_dict.keys():\n if increase_keyword_dict[key][0] - ((increase_keyword_dict[key][3]/num_all) * num_doc) >= 0:\n increase_keyword_dict[key].append(((increase_keyword_dict[key][0] - ((increase_keyword_dict[key][3]/num_all) * num_doc)) ** 2)/((increase_keyword_dict[key][3]/num_all) * num_doc))\n else:\n increase_keyword_dict[key].append((((increase_keyword_dict[key][0] - ((increase_keyword_dict[key][3]/num_all) * num_doc)) ** 2)/((increase_keyword_dict[key][3]/num_all) * num_doc))* -1)\n\nincrease_keyword_dict = sorted(increase_keyword_dict.items(),key = lambda x:x[1][4], reverse = True) #照TF卡方排\n\nfor key in decrease_keyword_dict.keys():\n if decrease_keyword_dict[key][0] - ((decrease_keyword_dict[key][3]/num_all) * num_doc) >= 0:\n decrease_keyword_dict[key].append(((decrease_keyword_dict[key][0] - ((decrease_keyword_dict[key][3]/num_all) * num_doc)) ** 2)/((decrease_keyword_dict[key][3]/num_all) * num_doc))\n else:\n decrease_keyword_dict[key].append((((decrease_keyword_dict[key][0] - ((decrease_keyword_dict[key][3]/num_all) * num_doc)) ** 2)/((decrease_keyword_dict[key][3]/num_all) * num_doc))* -1)\ndecrease_keyword_dict = sorted(decrease_keyword_dict.items(),key = lambda x:x[1][4], reverse = True) #照TF卡方排\n\n\n# In[ ]:\n\n\n#print(increase_keyword_dict[0:300])\n\n\n# In[ ]:\n\n\n#print(decrease_keyword_dict[0:300])\n\n\n# In[ ]:\n\n\nincrease_keyword = increase_keyword_dict[0:500]\ndecrease_keyword = decrease_keyword_dict[0:500]\n\n\n# In[ ]:\n\n\n#刪除重複關鍵字(兩邊都刪??)\nrepeat_keyword_increase = []\nrepeat_keyword_decrease = []\n\nfor i in range(len(increase_keyword)):\n for j in range(len(decrease_keyword)):\n if increase_keyword[i][0] == decrease_keyword[j][0]:\n repeat_keyword_increase.append(increase_keyword[i])\n repeat_keyword_decrease.append(decrease_keyword[i])\nfor i in range(len(repeat_keyword_increase)):\n increase_keyword.remove(repeat_keyword_increase[i])\nfor i in range(len(repeat_keyword_decrease)): \n decrease_keyword.remove(repeat_keyword_decrease[i])\n\n\n# In[ ]:\n\nfor a in increase_keyword:\n print(a)\n\nprint(\"我是分隔線我是分隔線我是分隔線我是分隔線我是分隔線我是分隔線我是分隔線我是分隔線我是分隔線我是分隔線我是分隔線\")\n# In[ ]:\n\nfor b in decrease_keyword:\n print(b)\n\n\n# In[ ]:\n\n\noutput_X = []\noutput_Y = []\nfor key in increase_article.keys():\n per_article = generate_keyword_tf(increase_article[key])\n x = {}\n for i in range(len(increase_keyword)):\n try:\n x[increase_keyword[i][0]] = per_article[increase_keyword[i][0]][0]\n except:\n x[increase_keyword[i][0]] = 0\n for i in range(len(decrease_keyword)):\n try:\n x[decrease_keyword[i][0]] = per_article[decrease_keyword[i][0]][0]\n except:\n x[decrease_keyword[i][0]] = 0\n output_X.append(x)\n output_Y.append([1,0])\n\n\n# In[ ]:\n\n\nfor key in decrease_article.keys():\n per_article = generate_keyword_tf(decrease_article[key])\n x = {}\n for i in range(len(increase_keyword)):\n try:\n x[increase_keyword[i][0]] = per_article[increase_keyword[i][0]][0]\n except:\n x[increase_keyword[i][0]] = 0\n for i in range(len(decrease_keyword)):\n try:\n x[decrease_keyword[i][0]] = per_article[decrease_keyword[i][0]][0]\n except:\n x[decrease_keyword[i][0]] = 0\n output_X.append(x)\n output_Y.append([0,1])\n\n\n# In[ ]:\n\n\noutput_x_numpy = np.array(output_X)\noutput_y_numpy = np.array(output_Y)\n\n\n# In[ ]:\n\n\nnp.save('./output_X_news_3d1atr.npy', output_x_numpy)\nnp.save('./output_Y_news_3d1atr.npy', output_y_numpy)\n\nwith open('increaseword_forum_3d1atr.pkl', 'wb') as f: \n pickle.dump(increase_keyword, f)\nwith open('decreaseword_forum_3d1atr.pkl', 'wb') as f: \n pickle.dump(decrease_keyword, f)\n\n","sub_path":"atrword.py","file_name":"atrword.py","file_ext":"py","file_size_in_byte":14155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"151775264","text":"#1086_성적관리프로그램\r\n\r\ndef putScore():\r\n lt=[]\r\n i=0\r\n while(i<7): \r\n n=int(input(\"%d번의 과목 점수를 입력하세요:\"%(1+i)))\r\n if(n>=0 and n<=100):\r\n lt.append(n)\r\n i+=1\r\n else:\r\n print(\"범위를 맞춰주세요\")\r\n continue \r\n return lt\r\n\r\nsubject=[]\r\nsubject=putScore()\r\nsum=0\r\nfor i in range(0,len(subject)):\r\n sum+=subject[i]\r\nprint(\"총점:%d\"%sum)\r\nprint(\"평균:%d\"%(sum/len(subject)))\r\ngreat=subject[0]\r\nsmall=subject[0]\r\nfor i in range(0,len(subject)-1):\r\n if(greatsubject[i+1]):\r\n small=subject[i+1]\r\nprint(\"최대=%d, 최소=%d\"%(great,small))\r\nsubject.append(sum/len(subject))\r\nsubject.append(great)\r\nsubject.append(small)\r\nfor i in range(10,0,-1):\r\n print(\"%2d\"%(i*10),end=\"\")\r\n for t in range(0,10,1):\r\n if (subject[t]/10>=i):\r\n print(\"%2s\"%\"*\",end=\"\")\r\n else:\r\n print(\" \",end=\"\")\r\n print()\r\nprint(\" \",end=\"\") \r\nfor i in range(0,10):\r\n print(\"%c \"%(65+i),end=\"\")\r\n \r\n","sub_path":"1086_성적관리프로그램.py","file_name":"1086_성적관리프로그램.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"411409383","text":"# -*- coding: utf-8 -*-\n# LIBRARY OF CaCo\n\n'''\nCanopy Cover (CaCo) V0.1\n\n===========================================================\nAn objective image analysis method for estimation of canopy\nattributes from digital cover photography\n\n* author: Alessandro Alivernini \n* paper: https://doi.org/10.1007/s00468-018-1666-3\n* git: https://github.com/alivernini/caco\n\n===========================================================\n\nCanopy Cover (CaCo)\nCopyright 2017-2018 Council for Agricultural Research and Economics\n\nPermission is hereby granted, free of charge, to any person\nobtaining a copy of this software and associated documentation\nfiles (the \"Software\"), to deal in the Software without restriction,\nincluding without limitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of the Software,\nand to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\nThe above copyright notice and this permission notice shall be included\nin all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\nOF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\nDAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE\nOR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n'''\n\n\n# @@ MODULE IMPORT\nimport os\nimport numpy as np\nimport pandas as pds\nimport rawpy\n\nimport scipy.ndimage\nfrom scipy import misc\n\nimport skimage\nimport skimage.io\nimport skimage.filters\n\nfrom .threshold_minimum import *\n\n#import skimage.feature\n#from skimage.measure import label\n\n\nclass CacoImg():\n '''\n Assess the gap fraction for a single img\n '''\n\n def __init__(self, param3, input_image): # @@\n ''' <1> read image and set the output dir '''\n self.param3 = param3\n self.warning = [] # store output errors\n\n self.path = input_image\n self.read_img(input_image)\n\n\n def run(self): # @@\n '''\n Performs all the gap fraction computations\n '''\n if self.check_warning(): return\n self.select_band()\n self.apply_threshold()\n if self.check_warning(): return\n self.get_regions()\n return self.assess_gap_fraction()\n\n print(\"CaCo img complete\")\n\n\n\n def select_band(self): # @@\n ''' <2> define the band (or the combination) used '''\n band = self.param3['band']\n\n bs_setter = {\n 'grey' : self.set_grey,\n 'greeness' : self.set_greeness,\n 'red' : [self.set_rgb, 1],\n 'green' : [self.set_rgb, 2],\n 'blue' : [self.set_rgb, 3],\n }\n setter = bs_setter[band]\n try:\n setter()\n except: # rgb case\n setter[0](setter[1])\n\n self.selection_shape = self.selection_img.shape\n self.image = None\n\n def apply_threshold(self): # @@\n ''' <3> apply threshold to selected band '''\n\n # SELECT THRESHOLD\n img = self.selection_img\n th_setter = {\n 'otzu': skimage.filters.threshold_otsu,\n 'isodata': skimage.filters.threshold_isodata,\n 'minimum': [threshold_minimum, 128]\n }\n setter = th_setter[self.param3['threshold']]\n try:\n threshold = setter(self.selection_img)\n except(TypeError): # the function has 2+ arguments\n try:\n threshold = setter[0](self.selection_img, setter[1])\n except Exception as e:\n print(e)\n threshold = skimage.filters.threshold_isodata(img)\n\n # APPLY THRESHOLD\n self.selection_img = img > threshold\n\n def get_regions(self): # @@\n ''' <4> define regions for gap pixels '''\n self.region, self.id = scipy.ndimage.measurements.label(self.selection_img)\n\n def assess_gap_fraction(self): # @@\n ''' <5> assess gap fraction '''\n # trick 1: img to vector\n region = self.region.flatten()\n shape_flatten = region.shape\n # trick 2: get the direct mapping of pixel having the the same label [see previous step]\n sort_index = np.argsort(region)\n # trick 3: get the reverse mapping for the happy ending\n reverse_sorted_index = np.argsort(sort_index)\n #--------------------------------------------------------------------------------\n # APPLY DIRECT MAPPING TO SAMPLE THE SIZE OF EACH IMG REGION\n sorted_region = region[sort_index]\n img = self.selection_img.flatten()[sort_index]\n #----------------------------------------------------------------------------\n # TWO LOOPS ARE PERFOMED:\n # 1. to define the threshold dividing normal gaps from big gaps;\n # 2. to sample the size of the gaps for the normal gaps\n\n big_gaps_defined = False # True at the end of the 1st loop\n output = np.zeros(shape_flatten) # stores the values of the output image\n gap_size_2 = [] # big and normal gaps\n normal_gap_size_2 = []\n big_gap_size_2 = []\n for unused in range(2):\n start = 0\n img_end = shape_flatten[0] - 1 # total of pixels -1\n np_sum = np.sum # small trick for speed\n # -->> loop start x 2\n while start < img_end:\n fid = sorted_region[start] # fid is equal to the new label value [remember get_regions?]\n end = start\n if end == img_end:\n break\n # --------------------------------------------------------------------------------\n # end continues to move 1 step ahead while the labelis equal\n while fid == sorted_region[end] and end < img_end:\n end += 1 # end finishes one step beyond the edge\n # --------------------------------------------------------------------------------\n if fid > 0: # this is not vegetation\n stat = np_sum(img[start:end-1]) # start and end are at the extremes of one label [end is 1 step beyond]\n if not big_gaps_defined: # 1st loop case\n gap_size_2.append(stat)\n else: # 2nd loop case\n if stat > big_gap_size:\n output[start:end-1] = 2\n big_gap_size_2.append(stat)\n else:\n output[start:end-1] = 1\n normal_gap_size_2.append(stat)\n start = end # end was already on a new label value\n # <<-- loop end x 2\n if not big_gaps_defined: # 1st loop case: compute stats\n big_gaps_defined = True\n gap_size_20 = np.array(gap_size_2)\n gap_mean = gap_size_20.mean()\n gap_std_dev = gap_size_20.std()\n gap_std_err = float(gap_std_dev) / float(len(gap_size_20))**0.5\n big_gap_size = gap_mean + gap_std_err\n gap_px = gap_size_20.sum()\n else:\n # finish loop and complete the statistics\n pass\n\n # normal gap statistics\n tmp = np.array(normal_gap_size_2)\n normal_gap_px = tmp.sum()\n normal_gap_std_dev = tmp.std()\n normal_gap_number = tmp.size\n normal_gap_std_err = normal_gap_std_dev / normal_gap_number**0.5\n\n # big gap statistics\n tmp = np.array(big_gap_size_2)\n big_gap_px = tmp.sum()\n big_gap_std_dev = tmp.std()\n big_gap_number = tmp.size\n big_gap_std_err = big_gap_std_dev / big_gap_number**0.5\n\n # trick 4: get the bunny out of the hat\n output = output[reverse_sorted_index]\n output = output.reshape(self.selection_shape)\n\n #TODO\n self.write_output_img(output)\n\n # compute the gap fraction\n leaf_px = (output.shape[0] * output.shape[1]) - normal_gap_px - big_gap_px\n image_px = output.shape[0] * output.shape[1]\n total_gap_fraction = float(gap_px) / float(image_px)\n large_gap_fraction = float(big_gap_px)/float(image_px)\n foliage_cover = float(1 - total_gap_fraction)\n canopy_cover = float(1 - large_gap_fraction)\n crown_porosity = 1 - foliage_cover / canopy_cover\n\n # prepare the dictionary for the desired statistics\n data_output = {\n 'normal_gap_px' : normal_gap_px,\n 'normal_gap_mean' : float(normal_gap_px)/ normal_gap_number,\n 'normal_gap_std_dev' : normal_gap_std_dev,\n 'normal_gap_std_err' : normal_gap_std_err,\n 'normal_gap_number' : normal_gap_number,\n\n 'big_gap_px' : big_gap_px,\n 'big_gap_mean' : float(big_gap_px) / big_gap_number,\n 'big_gap_std_dev' : big_gap_std_dev,\n 'big_gap_std_err' : big_gap_std_err,\n 'big_gap_number' : big_gap_number,\n\n 'total_gap_px' : gap_px,\n 'total_gap_mean' : gap_mean,\n 'total_gap_std_dev' : gap_std_dev,\n 'total_gap_std_err' : gap_std_err,\n 'total_gap_number' : normal_gap_number + big_gap_number,\n\n 'leaf_px' : leaf_px,\n 'image_px' : image_px,\n 'total_gap_fraction' : total_gap_fraction,\n 'large_gap_fraction' : large_gap_fraction,\n 'foliage_cover' : foliage_cover,\n 'canopy_cover' : canopy_cover,\n 'crown_porosity' : crown_porosity\n }\n return data_output\n\n #--------------------------------------------------------------------------------\n ## Setter methods ##\n\n def set_greeness(self): # @@\n ''' select greeness band combination'''\n self.selection_img = (\n self.image[:, :, 1]\n * 2 - (self.image[:, :, 0]\n + self.image[:, :, 2])\n )\n\n # https://it.mathworks.com/help/matlab/ref/rgb2gray.html\n def set_grey(self): # @@\n ''' select grey band combination'''\n self.selection_img = (\n self.image[:, :, 0] * 0.2989 +\n self.image[:, :, 1] * 0.5870 +\n self.image[:, :, 2] * 0.1140\n ).astype(self.image.dtype)\n\n\n def set_rgb(self, band): # @@\n ''' select one band '''\n self.selection_img = self.image[:, :, band]\n #--------------------------------------------------------------------------------\n ## Manage warning ##\n\n def check_warning(self):\n warning = False\n if self.warning:\n warning = True\n for wkey in self.warning:\n print(wkey)\n return warning\n\n #--------------------------------------------------------------------------------\n ## Input/output methods methods ##\n\n def write_output_img(self, gap_fraction_img): # @@\n o_dir = self.param3['output_dir']\n if not os.path.exists(o_dir):\n os.mkdir(o_dir)\n fileName = os.path.basename(self.path)\n fileName = fileName.split('.')[0] + '.jpg'\n path = os.path.join(self.param3['output_dir'],self.param3['th_dir'], fileName)\n misc.imsave(path, gap_fraction_img)\n\n def read_img(self, input_image): # @@\n ''' read the image and set the output dir '''\n try:\n if self.param3['raw_processing'] == True:\n raw = rawpy.imread(input_image)\n self.image = raw.postprocess()\n else: # read common image formats\n self.image = scipy.misc.imread(input_image)\n except: # read raw data\n print(e)\n self.error.append(['inputW'])\n\n","sub_path":"caco/library_cc.py","file_name":"library_cc.py","file_ext":"py","file_size_in_byte":12089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"382749936","text":"\nimport pathlib\nimport zipfile\nimport sqlite3\nfrom datetime import datetime\nimport time\nimport zlib\nimport json\nimport html\n\nimport pytz\nimport click\n\nfrom pynyaa import create_app, db, models\n\nconfig_file = pathlib.Path('config/development.py')\napp = create_app(config_file.absolute())\n\n\ndef extract_comment(text):\n if text.startswith('
    '):\n text = text[19:]\n if text.endswith('
    '):\n text = text[:-6]\n return text\n\n\ndef normalize_filesize(size):\n \"\"\"Takes string like \"123 MiB\" and converts it into integer of bytes.\"\"\"\n if size is None or not size:\n return 0\n size, suffix = size.rsplit(maxsplit=1)\n try:\n size = float(size.strip())\n except ValueError:\n return 0\n suffix_map = dict(b=0, kib=1, mib=2, gib=3, tib=4)\n suffix = suffix.lower()\n if suffix not in suffix_map:\n return int(size)\n return int(size * 1024**suffix_map[suffix])\n\n\n@app.cli.command()\n@click.argument('path')\n@click.argument('destination')\ndef import_sqlite(path, destination='import'):\n db.drop_all()\n db.create_all()\n\n filename = 'merged.sqlite3'\n destination = pathlib.Path(destination).absolute()\n sqlite_file = pathlib.Path(destination, filename)\n\n if not sqlite_file.exists():\n destination.mkdir(0o755, True, True)\n zf = zipfile.ZipFile(path)\n zf.extract(filename, destination)\n\n with sqlite3.connect(str(sqlite_file)) as conn:\n cursor = conn.cursor()\n\n cat_id_max, = cursor.execute('SELECT MAX(category_id) FROM categories').fetchone()\n status_id_max, = cursor.execute('SELECT MAX(status_id) FROM statuses').fetchone()\n subcat_id_max, = cursor.execute('SELECT MAX(sub_category_id) FROM sub_categories').fetchone()\n torrent_id_max, = cursor.execute('SELECT MAX(torrent_id) FROM torrents').fetchone()\n\n # On import, those tables will all have an explicit id specified.\n # For new records we reset the sequence counter.\n # We do this before importing to allow inserting of new records during the import.\n db.engine.execute(f'ALTER SEQUENCE category_id_seq RESTART WITH {cat_id_max+1}')\n db.engine.execute(f'ALTER SEQUENCE status_id_seq RESTART WITH {status_id_max+1}')\n db.engine.execute(f'ALTER SEQUENCE sub_category_id_seq RESTART WITH {subcat_id_max+1}')\n db.engine.execute(f'ALTER SEQUENCE torrent_id_seq RESTART WITH {torrent_id_max+1}')\n\n for row in cursor.execute('SELECT category_id, category_name FROM categories'):\n db.session.add(models.Category(**dict(zip(('id', 'name'), row))))\n\n for row in cursor.execute('SELECT status_id, status_name FROM statuses'):\n status_dict = dict(zip(('id', 'name'), row))\n if status_dict['name'] == 'a+':\n status_dict['name'] = 'aplus'\n\n status_dict['label'] = dict(\n normal='Normal',\n remake='Filter Remakes',\n trusted='Trusted',\n aplus='A+',\n ).get(status_dict['name'], status_dict['name'])\n status = models.Status(**status_dict)\n db.session.add(status)\n\n for row in cursor.execute('SELECT sub_category_id, parent_id, sub_category_name '\n 'FROM sub_categories'):\n db.session.add(models.SubCategory(**dict(zip(('id', 'parent_id', 'name'), row))))\n db.session.commit()\n\n # now the torrents\n tbl_fields = ['torrent_id', 'torrent_name', 'torrent_hash', 'category_id',\n 'sub_category_id', 'status_id', 'date', 'downloads', 'stardom', 'filesize',\n 'description', 'website_link', 'comments']\n fields = tbl_fields[:]\n fields[0:3] = ['id', 'name', 'hash']\n\n torrent_count, = cursor.execute('SELECT COUNT(*) FROM torrents').fetchone()\n\n users = {}\n user_status = {}\n start_time = datetime.now(pytz.utc)\n\n for torrent_number, row in enumerate(\n cursor.execute(f'SELECT {\",\".join(tbl_fields)} FROM torrents'),\n start=1):\n rowdict = dict(zip(fields, row))\n\n if not rowdict['name']:\n continue\n\n rowdict['filesize'] = normalize_filesize(rowdict['filesize'])\n if rowdict['filesize'] == 0:\n continue\n\n if isinstance(rowdict['hash'], bytes):\n rowdict['hash'] = rowdict['hash'].decode('ascii')\n if rowdict['hash'] is None or len(rowdict['hash']) != 40:\n continue\n\n rowdict['hash'] = str(rowdict['hash']).lower()\n\n if rowdict['date'] is None:\n rowdict['date'] = int(time.time())\n rowdict['date'] = datetime.fromtimestamp(rowdict['date'], pytz.utc)\n\n if rowdict['description']:\n try:\n rowdict['description'] = zlib.decompress(rowdict['description']).decode('utf-8')\n except Exception as exc:\n rowdict['description'] = None\n else:\n rowdict['description'] = None\n\n rowdict['is_sqlite_import'] = True\n\n comment_list = []\n if rowdict['comments']:\n try:\n comments = json.loads(zlib.decompress(rowdict['comments']).decode('utf-8'))\n except Exception as exc:\n comments = []\n\n for json_comment in comments:\n json_comment['un'] = html.unescape(json_comment['un'])\n\n comment = models.Comment(\n id=int(json_comment['id'].lstrip('c')),\n text=extract_comment(json_comment['c']),\n av=json_comment['av'],\n date=datetime.fromtimestamp(json_comment['t'], pytz.utc),\n old_user_name=json_comment['un'],\n user_id=None\n )\n\n if json_comment['ui'] not in users:\n\n if json_comment['us'] not in user_status:\n user_status[json_comment['us']] = models.UserStatus(\n name=json_comment['us']\n )\n db.session.add(user_status[json_comment['us']])\n\n if json_comment['us'] != 'User':\n user = models.User()\n user.name = json_comment['un']\n user.status = user_status[json_comment['us']]\n\n db.session.add(user)\n users[json_comment['ui']] = user\n\n if json_comment['ui'] in users:\n comment.old_user_name = None\n comment.user = users[json_comment['ui']]\n\n comment_list.append(comment)\n rowdict['comments'] = comment_list\n db.session.add(models.Torrent(**rowdict))\n\n # time already spent\n delta_time = datetime.now(pytz.utc) - start_time\n # time per torrent\n tpt = delta_time.total_seconds() / torrent_number\n rest = int((torrent_count - torrent_number) * tpt)\n eta = f'{rest//60:02d}:{rest%60:02d}'\n\n print(f'{torrent_number/torrent_count*100:.2f} % -'\n f' {torrent_number} / {torrent_count} - ETA: {eta}',\n end=' \\r')\n if torrent_number % 100 == 0:\n db.session.commit()\n if torrent_number % 10000 == 0:\n print()\n\n db.session.commit()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":7661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"26946975","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\nimport operator\n\ndataset = pd.read_csv('diabetes.csv')\nX = dataset.iloc[:, :9].values\n\n\n\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test = train_test_split(X,test_size = 0.20, random_state = 0)\n\ndef euclideanDistance(point_a, point_b,dim ):\n\tdist = 0.0\n\tfor x in range(0,dim):\n\t\tdist += (point_a[x] - point_b[x])**2\n\treturn math.sqrt(dist)\n\n#d=euclideanDistance(X_train[0], X_train[1], len(X_train[0])-1)\n \n\ndef K_Neighbors(train, x, k):\n distances = []\n dim=len(x)-1\n for i in range(0,len(train)):\n dist = euclideanDistance(x, train[i], dim)\n distances.append((train[i], dist))\n distances.sort(key=operator.itemgetter(1))\n neigh = []\n for i in range(0,k):\n neigh.append(distances[i][0])\n \n return neigh\n \ndef get_prediction(neigh):\n\tclass_count = {}\n\tfor i in range(len(neigh)):\n\t\tresponse = neigh[i][-1]\n\t\tif response in class_count:\n\t\t\tclass_count[response] += 1\n\t\telse:\n\t\t\tclass_count[response] = 1\n\tclass_count_sort = sorted(class_count.items(), key=operator.itemgetter(1), reverse=True)\n\treturn class_count_sort[0][0]\n \ndef Accuracy(Test, predictions):\n\tcorrect = 0\n\tfor x in range(len(Test)):\n\t\tif Test[x][-1] == predictions[x]:\n\t\t\tcorrect += 1\n\treturn (correct/float(len(Test))) * 100.0\n\n\nk=1\nacc=[0.0]*50\nfor k in range(1,50): \n predictions=[]\n for x in range(len(X_test)):\n kneighbors = K_Neighbors(X_train, X_test[x], k)\n result = get_prediction(kneighbors)\n predictions.append(result)\n acc[k]=Accuracy(X_test, predictions)\nprint(acc)\n\n\nx = np.linspace(1, 50, num=50)\nplt.xlabel(\"Value of K\")\nplt.ylabel(\"Total Accuracy\")\nplt.plot(x, acc)\nplt.show()\n\n\n","sub_path":"Classification Using KNN Without scikit learn/knn_diabetes.py","file_name":"knn_diabetes.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"350614463","text":"import re\nimport requests\n\n\ndef get_item_from_json(json_dict, field):\n for item in field.split('.'):\n items = list(filter(None, re.split(r'[\\[\\]]', item)))\n key, indexes = items[0], map(int, items[1:])\n json_dict = json_dict[key]\n if indexes:\n for i in indexes:\n json_dict = json_dict[i]\n return json_dict\n\n\ndef dot2python(field):\n z = []\n for i, item in enumerate(field.split('.')):\n if i == 0:\n z.append(item)\n else:\n item_split = item.split('[')\n if item_split[0][-1] == ')': # Support for type casting \"int(v.firsts[0].second)\"\n key = '[\"{}\"])'.format(item_split[0].rstrip(')'))\n else:\n key = '[\"{}\"]'.format(item_split[0])\n z.append(key)\n if len(item_split) > 1:\n z.append(item[len(item_split[0]):])\n return ''.join(z)\n\n\ndef create_url(url, path_params, query_params):\n if path_params is not None:\n try:\n url = url.format(**path_params)\n except KeyError as e:\n msg = 'Missing field in pathParams ({})'\n raise ValueError(msg.format(e))\n if query_params is not None:\n url += '?' + '&'.join(['{}={}'.format(k, query_params[k])\n for k in query_params])\n return url\n\n\ndef num_compare(a, b, operator):\n a, b = float(a), float(b)\n if operator in ['=', '==', 'eq']:\n return a == b\n elif operator in ['!=', 'ne']:\n return a != b\n elif operator in ['>', 'gt']:\n return a > b\n elif operator in ['>=', 'ge']:\n return a >= b\n elif operator in ['<', 'lt']:\n return a < b\n elif operator in ['<=', 'le']:\n return a <= b\n","sub_path":"dargus/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"203578574","text":"# xls 转 xml\n# coding: UTF-8\n# 使用 xrld 模块操作 xls文件\n# 使用 xml.dom.minidom 模块操作 xml 文件\n\nimport xlrd\nimport xml.dom.minidom as md\n\n# # 用于 17题和18题\ndef get_xlscontent(path):\n # 打开工作簿\n workbook = xlrd.open_workbook(path)\n # 打开表\n sheet = workbook.sheets()[0]\n # print(sheet.nrows)\n # 获取表里的内容\n content = {}\n for i in range(sheet.nrows):\n content[i+1] = sheet.row_values(i)[1:]\n return content\n\n# 用于 19题,语法基本同上 \ndef get_xlscontent_19(path):\n workbook = xlrd.open_workbook(path)\n sheet = workbook.sheets()[0]\n content = []\n for i in range(sheet.nrows):\n content.append(sheet.row_values(i))\n return content\n\n# 将内容写入xml\ndef wirte_xml(content):\n # 获取一个xml文档\n doc = md.Document()\n # 创建一个root标签\n root = doc.createElement('root')\n # 将root添加进xml文档\n doc.appendChild(root)\n # 用循环添加多个p标签及内容\n for i in range(len(content)):\n # 创建p标签\n p = doc.createElement('p')\n root.appendChild(p)\n # 创建一个 文本节点\n xmlcontent = doc.createTextNode(str(content[i]))\n # 将 文本节点 添加到 p标签\n p.appendChild(xmlcontent)\n return doc\n\ndef save_xml(doc):\n # print(doc.toprettyxml(encoding='utf-8'))\n with open('17_18_19.xml', 'wb') as f:\n f.write(doc.toprettyxml(encoding = 'utf-8'))\n\ndef main():\n path = [\n r'C:\\Users\\pp\\Documents\\Python\\show_my_code\\14_15_16\\student.xls',\n r'C:\\Users\\pp\\Documents\\Python\\show_my_code\\14_15_16\\city.xls',\n r'C:\\Users\\pp\\Documents\\Python\\show_my_code\\14_15_16\\num.xls'\n ]\n # 将17,18题 与 19题分开操作\n content = []\n for i in range(len(path)):\n if i < 2:\n c = get_xlscontent(path[i])\n else:\n c = get_xlscontent_19(path[i])\n content.append(c)\n doc = wirte_xml(content)\n save_xml(doc)\n\nif __name__ == '__main__':\n main()","sub_path":"17_18_19/17_18_19.py","file_name":"17_18_19.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"367154908","text":"from qfunction.base.base import *\nfrom numpy import sin, cos\nimport numpy as np\nfrom math import atan\n\ndef q_sin(u,q,cpx=False,israd=True):\n u = radian(u) if( not israd) else u\n b = 1j\n u*=1j\n if cpx:\n return ((q_exp(u,q)-q_exp(-u,q)))/(2*b)\n else:\n return (((q_exp(u,q)-q_exp(-u,q)))/(2*b)).real\n\n\n\ndef q_cos(u,q=1,cpx=False,israd=True):\n u = radian(u) if not israd else u\n u=u*1j;\n A =lambda w: 1/(1-w)\n if (q> 1.9 and u>= limit(A,q)):\n return np.nan\n else:\n \tif cpx:\n return ((q_exp(u,q)+q_exp(-u,q)))/2\n \telse:\n return (((q_exp(u,q)+q_exp(-u,q)))/2).real\n\n","sub_path":".history/qfunction/base/trigonometry_20210705114915.py","file_name":"trigonometry_20210705114915.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"307146319","text":"#!/usr/bin/env python\nimport argparse\nimport os\n\nimport cv2\nimport numpy as np\nimport rosbag\nfrom cv_bridge import CvBridge\n\nfrom dronet_tello.msg import FlightData\n\n# from geometry_msgs.msg import Twist\n# from sensor_msgs.msg import Image\n\nFLIGHT_DATA_ATTRIBUTES = [\n name\n for name in sorted(dir(FlightData))\n if not name.startswith(\"_\") and \"serialize\" not in name and \"header\" not in name\n]\n\n\ndef flight_data_to_csv(flight_data):\n return \",\".join(repr(getattr(flight_data, name)) for name in FLIGHT_DATA_ATTRIBUTES)\n\n\ndef velocity_to_csv(velocity):\n return \",\".join(\n [\n repr(velocity.linear.x),\n repr(velocity.linear.y),\n repr(velocity.linear.z),\n repr(velocity.angular.x),\n repr(velocity.angular.y),\n repr(velocity.angular.z),\n ]\n )\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser(description=\"Bag parser for tello data\")\n parser.add_argument(\"bag_file\", type=str, help=\"The bag file to parse\")\n parser.add_argument(\n \"-d\",\n \"--directory\",\n type=str,\n default=\"./training\",\n help=\"The directory to save data to\",\n )\n args = parser.parse_args()\n return args\n\n\ndef main(args):\n path = args.directory\n image_path = os.path.join(path, args.bag_file[:-4])\n data_filename = os.path.join(path, \"%s.txt\" % args.bag_file[:-4])\n if not os.path.exists(image_path):\n os.makedirs(image_path)\n with open(data_filename, \"w+\") as data_file:\n data_file.write(\n \"image1,image2,{0},{0},{1}\\n\".format(\n \",\".join(FLIGHT_DATA_ATTRIBUTES),\n \"linear.x,linear.y,linear.z,angular.x,angular.y,angular.z\",\n )\n )\n with rosbag.Bag(args.bag_file) as bag:\n last_2_images = []\n last_2_flight_data = []\n last_message_type = None\n started = False\n for topic, msg, time in bag.read_messages():\n assert len(last_2_images) <= 2\n assert len(last_2_flight_data) <= 2\n if topic == \"/image_raw\":\n img = CvBridge().imgmsg_to_cv2(msg, msg.encoding)\n # img = np.roll(img, 1, axis=-1)\n image_name = os.path.join(image_path, \"{}.jpg\".format(time))\n last_2_images = last_2_images[-1:] + [(image_name, img)]\n last_message_type = \"image\"\n elif topic == \"/flight_data\":\n last_2_flight_data = last_2_flight_data[-1:] + [msg]\n last_message_type = \"flight_data\"\n elif topic == \"/velocity\":\n if len(last_2_images) < 2 or len(last_2_flight_data) < 2:\n print(\"waiting for data\")\n continue\n if last_message_type == \"velocity\":\n print(\"multiple velocities for same input\")\n continue\n if (\n not started\n and msg.linear.x == 0.0\n and msg.linear.y == 0.0\n and msg.linear.z == 0.0\n and msg.angular.x == 0.0\n and msg.angular.y == 0.0\n and msg.angular.z == 0.0\n ):\n print(\"No movement... waiting for initial command\")\n continue\n started = True\n last_2_image_names = []\n for image_name, img in last_2_images:\n cv2.imwrite(image_name, img)\n last_2_image_names.append(image_name)\n data_file.write(\",\".join(last_2_image_names))\n data_file.write(\",\")\n data_file.write(\n \",\".join(map(flight_data_to_csv, last_2_flight_data))\n )\n data_file.write(\",\")\n data_file.write(velocity_to_csv(msg))\n data_file.write(\"\\n\")\n last_message_type = \"velocity\"\n\n\nif __name__ == \"__main__\":\n main(_parse_args())\n","sub_path":"bag_parser.py","file_name":"bag_parser.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"539104298","text":"# demonstrates various linear algebra operations involving matrices \n# with tensorflow\nimport tensorflow as tf\n \na = tf.constant([[1,2,3], [4,5,6]], name='a') # matrices\nb = tf.constant([[1,2,3], [4,5,6]], name='b')\nd = tf.constant([100,101,102], name='d')\nc = tf.constant(100, name='c') # scalar\naddOp = a + b # column + column\naddOp2 = a + c # list + scalars\naddOp3 = a + d # 1d matrix broadcasted into 2d matrix\n \nwith tf.Session() as session:\n print(\"Elementwise operation:\",session.run(addOp))\n print(\"Broadcasted operation:\",session.run(addOp2))\n print(\"Matrix broadcast:\",session.run(addOp3))\n","sub_path":"Tensorflow/matrixAlgebra.py","file_name":"matrixAlgebra.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"417149177","text":"from flask import Flask\nfrom flask import request\nfrom get_data import findAllTransactions,getUserInfo,getTransaction,createTransaction,getVendorInfo\n\napp = Flask(__name__)\n\n@app.route(\"/getAllTransactions\", methods=['GET','POST'])\ndef getTransactions():\n return findAllTransactions(int(request.form.get(\"user_id\")))\n\n@app.route(\"/getOneTransaction\", methods=['GET','POST'])\ndef getOneTransaction():\n user_id = int(request.form.get(\"user_id\"))\n invoice_id = int(request.form.get(\"invoice_id\"))\n return getTransaction(user_id, invoice_id)\n\n@app.route(\"/getUserData\", methods=['GET','POST'])\ndef getUserData():\n user_id = int(request.form.get(\"user_id\"))\n return getUserInfo(user_id)\n\n@app.route(\"/createNewTransaction\", methods=['GET','POST'])\ndef newTransaction():\n date_of_transaction = request.form.get(\"date_of_transaction\")\n time_of_transaction = request.form.get(\"time_of_transaction\")\n user_id = int(request.form.get(\"user_id\"))\n vendor_id = int(request.form.get(\"vendor_id\"))\n invoice_id = int(request.form.get(\"invoice_id\"))\n # should be passed in by a one-line argument as a string list\n items_purchased = request.form.get(\"items_purchased\")\n\n return createTransaction(user_id,date_of_transaction,time_of_transaction,vendor_id,invoice_id,items_purchased)\n\n@app.route(\"/getVendorData\", methods=['GET','POST'])\ndef getVendorData():\n vendor_id = int(request.form.get(\"vendor_id\"))\n return getVendorInfo(vendor_id)\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"201631044","text":"# -*- coding: utf-8 -*-\n# @project : just_to_eat\n# @file : 021_Merge_Two_Sorted_Lists.py\n# @time : 2019-09-06\n\n'''\nMerge Two Sorted Lists 合并两个有序链表\n\n将两个有序链表合并为一个新的有序链表并返回。新链表是通过拼接给定的两个链表的所有节点组成的。 \n\n示例:\n\n输入:1->2->4, 1->3->4\n输出:1->1->2->3->4->4\n'''\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n'''\n方法 1:递归\n想法\n\n我们可以如下递归地定义在两个链表里的 merge 操作(忽略边界情况,比如空链表等):\n​\t \n\n也就是说,两个链表头部较小的一个与剩下元素的 merge 操作结果合并。\n\n算法\n\n我们直接将以上递归过程建模,首先考虑边界情况。\n特殊的,如果 l1 或者 l2 一开始就是 null ,那么没有任何操作需要合并,所以我们只需要返回非空链表。\n否则,我们要判断 l1 和 l2 哪一个的头元素更小,然后递归地决定下一个添加到结果里的值。\n如果两个链表都是空的,那么过程终止,所以递归过程最终一定会终止。\n\n'''\n\nclass Solution:\n '''\n 递归\n '''\n def mergeTwoLists(self, l1, l2):\n if l1 is None:\n return l2\n elif l2 is None:\n return l1\n elif l1.val < l2.val:\n l1.next = self.mergeTwoLists(l1.next, l2)\n return l1\n else:\n l2.next = self.mergeTwoLists(l1, l2.next)\n return l2\n\n'''\n方法 2:迭代\n想法\n\n我们可以用迭代的方法来实现上述算法。我们假设 l1 元素严格比 l2元素少,我们可以将 l2 中的元素逐一插入 l1 中正确的位置。\n\n算法\n\n首先,我们设定一个哨兵节点 \"prehead\" ,这可以在最后让我们比较容易地返回合并后的链表。\n我们维护一个 prev 指针,我们需要做的是调整它的 next 指针。\n然后,我们重复以下过程,直到 l1 或者 l2 指向了 null :如果 l1 当前位置的值小于等于 l2 ,我们就把 l1 的值接在 prev 节点的后面同时将 l1 指针往后移一个。\n否则,我们对 l2 做同样的操作。不管我们将哪一个元素接在了后面,我们都把 prev 向后移一个元素。\n\n在循环终止的时候, l1 和 l2 至多有一个是非空的。由于输入的两个链表都是有序的,所以不管哪个链表是非空的,它包含的所有元素都比前面已经合并链表中的所有元素都要大。\n这意味着我们只需要简单地将非空链表接在合并链表的后面,并返回合并链表。\n\n'''\n\nclass Solution:\n def mergeTwoLists(self, l1, l2):\n # maintain an unchanging reference to node ahead of the return node.\n prehead = ListNode(-1)\n\n prev = prehead\n while l1 and l2:\n if l1.val <= l2.val:\n prev.next = l1\n l1 = l1.next\n else:\n prev.next = l2\n l2 = l2.next\n prev = prev.next\n\n # exactly one of l1 and l2 can be non-null at this point, so connect\n # the non-null list to the end of the merged list.\n prev.next = l1 if l1 is not None else l2\n\n return prehead.next","sub_path":"021_Merge_Two_Sorted_Lists.py","file_name":"021_Merge_Two_Sorted_Lists.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"168143926","text":"import os\nimport tempfile\n\nfrom settings import settings\n\nfrom office365.runtime.auth.client_credential import ClientCredential\nfrom office365.sharepoint.client_context import ClientContext\n\n\ndef print_download_progress(offset):\n print(\"Downloaded '{0}' bytes...\".format(offset))\n\n\nsite_url = settings.get('url') + \"/sites/team\"\ncredentials = ClientCredential(settings.get('client_credentials').get('client_id'),\n settings.get('client_credentials').get('client_secret'))\nctx = ClientContext(site_url).with_credentials(credentials)\n\nfile_url = '/sites/team/Shared Documents/big_buck_bunny.mp4'\nsource_file = ctx.web.get_file_by_server_relative_url(file_url)\nlocal_file_name = os.path.join(tempfile.mkdtemp(), os.path.basename(file_url))\nwith open(local_file_name, \"wb\") as local_file:\n source_file.download_session(local_file, print_download_progress).execute_query()\nprint(\"[Ok] file has been downloaded: {0}\".format(local_file_name))\n","sub_path":"examples/sharepoint/files/download_file_large.py","file_name":"download_file_large.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"313924767","text":"from flask import Flask\nfrom flask_mail import Mail\n\napp = Flask(__name__)\nmail_settings = {\n \"MAIL_SERVER\": 'smtp.gmail.com',\n \"MAIL_PORT\": 465,\n \"MAIL_USE_TLS\": False,\n \"MAIL_USE_SSL\": True,\n \"MAIL_USERNAME\": 'ae1276871@gmail.com',\n \"MAIL_PASSWORD\": 'rdouuhrmnpwmrwhn'\n}\napp.config.update(mail_settings)\nmail = Mail(app)\napp.secret_key = 'xxxyyyzzz'\n# login_manager = LoginManager(app)\n# login_manager.login_view = 'auth.login_get'\n\nfrom controller.auth import auth\nfrom controller.main import blue\nfrom controller.family import family\n\napp.register_blueprint(blue)\napp.register_blueprint(auth)\napp.register_blueprint(family)\n\n\n@app.route('/')\ndef home_page():\n return 'Home Page!'\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"65441380","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, _, api, exceptions, fields, SUPERUSER_ID\nfrom odoo.addons.point_of_sale.models.pos_config import PosConfig\nimport logging\nfrom odoo.exceptions import ValidationError\n\nlogger = logging.getLogger(__name__)\n\ndef update_wrong_surcharge_journal_in_configs(cr, registry):\n env = api.Environment(cr, SUPERUSER_ID, {})\n \n pos_configs = env['pos.config'].search([])\n for config in pos_configs:\n logger.warning(\"Changing wrong surcharge journal in %s[%i] pos config\" % (config.name, config.id))\n if config.surcharge_journal_id.company_id != config.company_id:\n # We use SQL just because we want to avoid the exception that is raised \n # when trying to change a config with a opened session\n cr.execute(\"\"\"UPDATE pos_config SET surcharge_journal_id = %s WHERE id = %s\"\"\", (config.journal_id.id, config.id))\n\n\nclass PosConfigInherit(models.Model):\n \"\"\" Adds constraints \"\"\"\n _inherit = \"pos.config\"\n\n is_pos_pr_discount = fields.Boolean(default=False)\n\n def _default_sale_surcharge(self):\n company = self.company_id or self.env.company\n return self.env['account.journal'].search(\n [('type', '=', 'sale'),\n ('company_id', '=', company.id),\n ('code', '=', 'POSS')], limit=1)\n\n \n surcharge_journal_id = fields.Many2one(\n 'account.journal', string='Surcharge Journal',\n domain=[('type', '=', 'sale')],\n help=\"Surcharge journal used to post surcharge entries.\",\n default=_default_sale_surcharge,\n ondelete='restrict')\n\n @api.constrains('payment_method_ids')\n def check_if_there_is_discount_payment_method(self):\n for pos_config_id in self:\n discount_payment_method = pos_config_id.payment_method_ids.filtered(lambda payment_method: payment_method.is_pos_pr_discount)\n if discount_payment_method:\n raise exceptions.ValidationError(_(\"%s is only for aesthetic use\") % discount_payment_method.name)\n\n def _check_surcharge_journal(self):\n self.ensure_one()\n if not self.sudo().surcharge_journal_id:\n raise ValidationError(\"You should use a surcharge journal\")\n \n if self.sudo().surcharge_journal_id.company_id != self.company_id:\n raise ValidationError(\"You should use a surcharge journal\")\n \n def _check_surcharge_and_discount(self):\n self.ensure_one()\n if not self.env['pos.payment.method'].search([('company_id', '=', self.company_id.id)]):\n pass\n\n def open_session_cb(self):\n self._check_surcharge_journal()\n self._check_surcharge_and_discount()\n return super().open_session_cb()\n","sub_path":"pos_pr/models/pos_config.py","file_name":"pos_config.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"147790741","text":"\"\"\"\nCopyright (c) 2012 Shotgun Software, Inc\n----------------------------------------------------\n\nMethods for handling of the tank command\n\n\"\"\"\n\nfrom ...errors import TankError\n\nfrom .. import setup_project\nfrom .. import validate_config\nfrom .. import core_api_admin\n\nfrom .action_base import Action\n\nimport os\n\n \nclass SetupProjectAction(Action):\n \n def __init__(self):\n Action.__init__(self, \n \"setup_project\", \n Action.GLOBAL, \n \"Sets up a new project with Tank.\", \n \"Configuration\")\n \n def run(self, log, args):\n if len(args) != 0:\n raise TankError(\"This command takes no arguments!\")\n setup_project.interactive_setup(log, self.code_install_root)\n \n \nclass CoreUpgradeAction(Action):\n \n def __init__(self):\n Action.__init__(self, \n \"core\", \n Action.GLOBAL, \n \"Checks that your Tank Core API install is up to date.\", \n \"Configuration\")\n \n def run(self, log, args):\n if len(args) != 0:\n raise TankError(\"This command takes no arguments!\")\n if self.code_install_root != self.pipeline_config_root:\n # we are updating a parent install that is shared\n log.info(\"\")\n log.warning(\"You are potentially about to update the Core API for multiple projects.\")\n log.info(\"\")\n core_api_admin.interactive_update(log, self.code_install_root)\n \n\nclass CoreLocalizeAction(Action):\n \n def __init__(self):\n Action.__init__(self, \n \"localize\", \n Action.PC_LOCAL, \n (\"Installs the Core API into your current Configuration. This is typically \"\n \"done when you want to test a Core API upgrade in an isolated way. If you \"\n \"want to safely test an API upgrade, first clone your production configuration, \"\n \"then run the localize command from your clone's tank command.\"), \n \"Admin\")\n \n def run(self, log, args):\n if len(args) != 0:\n raise TankError(\"This command takes no arguments!\")\n core_api_admin.install_local_core(log, self.code_install_root, self.pipeline_config_root)\n\n\n\nclass ValidateConfigAction(Action):\n \n def __init__(self):\n Action.__init__(self, \n \"validate\", \n Action.PC_LOCAL, \n (\"Validates your current Configuration to check that all \"\n \"environments have been correctly configured.\"), \n \"Configuration\")\n \n def run(self, log, args):\n if len(args) != 0:\n raise TankError(\"This command takes no arguments!\")\n validate_config.validate_configuration(log, self.tk)\n\n\nclass ClearCacheAction(Action):\n \n def __init__(self):\n Action.__init__(self, \n \"clear_cache\", \n Action.PC_LOCAL, \n (\"Clears the Shotgun Menu Cache associated with this Configuration. \"\n \"This is sometimes useful after complex configuration changes if new \"\n \"or modified Tank menu items are not appearing inside Shotgun.\"), \n \"Admin\")\n \n def run(self, log, args):\n if len(args) != 0:\n raise TankError(\"This command takes no arguments!\")\n \n cache_folder = self.tk.pipeline_configuration.get_cache_location()\n # cache files are on the form shotgun_mac_project.txt\n for f in os.listdir(cache_folder):\n if f.startswith(\"shotgun\") and f.endswith(\".txt\"):\n full_path = os.path.join(cache_folder, f)\n log.debug(\"Deleting cache file %s...\" % full_path)\n try:\n os.remove(full_path)\n except:\n log.warning(\"Could not delete cache file '%s'!\" % full_path)\n \n log.info(\"The Shotgun menu cache has been cleared.\")\n \n","sub_path":"studio/install/core/python/tank/deploy/tank_commands/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"531197298","text":"# Array data structure implementation\n\n# class RingBuffer:\n# def __init__(self, capacity):\n# self.buff = []\n# self.capacity = capacity\n# self.size = 0\n# self.pointer = 0\n\n# def append(self, item):\n# if self.size < self.capacity:\n# self.buff.append(item)\n# self.size += 1\n# else:\n# if self.pointer >= self.capacity:\n# self.pointer = 0\n# self.buff.pop(self.pointer)\n# self.buff.insert(self.pointer, item)\n# self.pointer +=1\n\n# def get(self):\n# return [i for i in self.buff if i is not None]\n\n\n# Singly Linked List data structure implementation\n\nclass Node:\n def __init__(self, value, next_node=None, prev_node=None):\n self.value = value\n self.next_node = next_node\n\n\nclass RingBuffer:\n def __init__(self, capacity):\n self.capacity = capacity\n self.head = None\n self.tail = None\n self.size = 0\n self.pointer = self.head\n \n\n def append(self, item):\n new_node = Node(item)\n # if storage current size less then capacity\n if self.size < self.capacity:\n if self.head is None or self.tail is None:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next_node = new_node\n self.tail = new_node\n self.size += 1\n self.pointer = new_node\n # if capacity equals or greater than size\n else:\n if self.pointer == self.tail:\n self.pointer = self.head\n self.tail = new_node\n new_node.next_node = self.pointer\n if self.pointer == self.head:\n self.pointer = self.head.next_node\n self.head = new_node\n new_node.next_node = self.pointer\n else:\n # print(\"current pointer\", self.pointer.value)\n if self.pointer.next_node == None:\n old_pointer_next = self.head\n else:\n old_pointer_next = self.pointer.next_node\n \n self.pointer.value = new_node.value\n self.pointer = old_pointer_next\n \n # print(\"next pointer\", self.pointer.value)\n # print(\"new node\", new_node.value)\n\n \n def get(self):\n l = []\n curr = self.head\n while curr:\n l.append(curr.value)\n curr = curr.next_node\n return l\n\n\nr = RingBuffer(6)\nr.append('a')\nprint(r.get())\nr.append('b')\nprint(r.get())\nr.append('c')\nprint(r.get())\nr.append('d')\nprint(r.get())\nr.append('e')\nprint(r.get())\nr.append('1')\nprint(r.get())\nr.append('2')\nprint(r.get())\nr.append('3')\nprint(r.get())\nr.append('4')\nprint(r.get())\nr.append('5')\nprint(r.get())\nr.append('6')\nprint(r.get())\nr.append('7')\nprint(r.get())\nr.append('8')\nprint(r.get())\nr.append('9')\nprint(r.get())\n","sub_path":"ring_buffer/ring_buffer.py","file_name":"ring_buffer.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"309000645","text":"import streamlit as st\nimport requests\n\nst.title('Form example: What is your favourite film?')\n\n# fastapi endpoint\nurl = 'http://fastapi:8000'\nendpoint = '/postfilm'\n\noption = st.selectbox(\n 'How would you like to be contacted to speak more about your favourite film?',\n ('Email', 'Home phone', 'Mobile phone'))\n\ntitle = st.text_input('Film title', 'Life of Brian')\n\n\ndef process(title, option, server_url: str):\n\n r = requests.post(server_url, json={\"title\": title, \"option\": option})\n\n return r\n\n\nif st.button('Submit'):\n\n if title == None or title == \"\":\n st.write(\"Insert a title!\") # handle case with no image\n else:\n result = process(title, option, url + endpoint)\n if (result.status_code == 200):\n st.write(result)\n jsonDecoded = result.json()\n st.write(\"Your favourite film is \" + jsonDecoded[\"YourTitle\"])\n st.write(\"You would like to be contacted by \" + jsonDecoded[\"YourOption\"])\n\n","sub_path":"streamlit/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"233421074","text":"# game_map directions:\nTOP_RIGHT = 0\nTOP_LEFT = 1\n\n# unit types. the strings will be overloaded with rich object data once algo starts:\nFILTER = \"FF\"\nENCRYPTOR = \"EF\"\nDESTRUCTOR = \"DF\"\nPING = \"PI\"\nEMP = \"EI\"\nSCRAMBLER = \"SI\"\nUNIT_TYPES = {\n FILTER: None,\n ENCRYPTOR: None,\n DESTRUCTOR: None,\n PING: None,\n EMP: None,\n SCRAMBLER: None,\n}\n\n# standard strategy's EMP attack status\nUNLOADED = 0\nREADY_TO_FIRE = 1\n\n# map locations\nENEMY_LOCATIONS = []\nfor row in range(14):\n y_index = 27 - row\n x_lower_bound = 13 - row\n x_upper_bound = 14 + row\n for x_index in range(x_lower_bound, x_upper_bound + 1):\n ENEMY_LOCATIONS.append([x_index, y_index])\n\nPLAYER_LOCATIONS = []\nfor row in range(14):\n y_index = row\n x_lower_bound = 13 - row\n x_upper_bound = 14 + row\n for x_index in range(x_lower_bound, x_upper_bound + 1):\n PLAYER_LOCATIONS.append([x_index, y_index])\n","sub_path":"strategies/ping-spam/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"513930588","text":"from rest_framework.viewsets import ReadOnlyModelViewSet, ViewSet\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.generics import RetrieveAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.request import Request\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import status\nfrom datetime import datetime, timedelta\nimport pytz\nfrom shattuckite.commonapi import CommonPageNumberPagination\nfrom django.utils.timezone import now\n\nfrom .serializer import SensorDataSerializer, SensorSerializer\nfrom rest_framework.decorators import action\nfrom .models import SensorData, Sensor\n\n\nclass SensorViewSet(ReadOnlyModelViewSet):\n serializer_class = SensorSerializer\n queryset = Sensor.objects.all()\n pagination_class = CommonPageNumberPagination\n\n @action(methods=[\"POST\"], detail=True)\n def threshold(self, req: Request, pk):\n data = req.data\n threshold = data.get(\"threshold\", None)\n try:\n if threshold!= None:\n threshold = float(threshold)\n except:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n SensorObj = get_object_or_404(Sensor, pk=pk) # type:Sensor\n SensorObj.Threshold = threshold\n SensorObj.save()\n return Response(status=status.HTTP_200_OK)\n\n\nclass SensorDataViewSet(ViewSet):\n\n @action(methods=['GET'], detail=False)\n def range(self, request: Request):\n try:\n data = request.query_params\n bTimeStr = data['begin']\n eTimeStr = data['end']\n sensorID = data['sensorID']\n except:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n begiTime = datetime.fromtimestamp(float(bTimeStr), tz=pytz.timezone(\"Asia/Shanghai\"))\n endTime = datetime.fromtimestamp(float(eTimeStr), tz=pytz.timezone(\"Asia/Shanghai\"))\n\n data = SensorData.objects.all().filter(Sensor__SensorID=sensorID).filter(Datetime__gte=begiTime).filter(\n Datetime__lte=endTime)\n\n serObj = SensorDataSerializer(data, many=True, read_only=True)\n return Response(data=serObj.data)\n\n @action(methods=['GET'], detail=False)\n def realTimeData(self, request: Request):\n\n try:\n data = request.query_params\n sensorID = str(data['sensorID'])\n sec = int(data.get(\"seconds\", 30))\n\n except:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n data = SensorData.objects.all().filter(Sensor__SensorID=sensorID).filter(\n Datetime__gte=now() - timedelta(seconds=sec)).filter(\n Datetime__lte=now())\n\n serObj = SensorDataSerializer(data, many=True, read_only=True)\n return Response(data=serObj.data)\n","sub_path":"src/Sensor/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"584136987","text":"##############################################################################\n#\n# Copyright (c) 2003 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"A few common items that don't fit elsewhere, it seems.\n\nClasses:\n- Error -- an exception\n\nFunctions:\n- getoriginal(path)\n- getextra(path)\n- getannotations(path)\n- getspecial(path, what)\n- split(path)\n- ensuredir(dir)\n\nVariables:\n- unwanted -- the tuple (\"\", os.curdir, os.pardir)\n- nczope -- the string os.path.normcase(\"@@Zope\")\n\n$Id$\n\"\"\"\n\nimport os\nimport sys\nimport unicodedata\n\nclass Error(Exception):\n \"\"\"User-level error, e.g. non-existent file.\n\n This can be used in several ways:\n\n 1) raise Error(\"message\")\n 2) raise Error(\"message %r %r\" % (arg1, arg2))\n 3) raise Error(\"message %r %r\", arg1, arg2)\n 4) raise Error(\"message\", arg1, arg2)\n\n - Forms 2-4 are equivalent.\n\n - Form 4 assumes that \"message\" contains no % characters.\n\n - When using forms 2 and 3, all % formats are supported.\n\n - Form 2 has the disadvantage that when you specify a single\n argument that happens to be a tuple, it may get misinterpreted.\n\n - The message argument is required.\n\n - Any number of arguments after that is allowed.\n \"\"\"\n\n def __init__(self, msg, *args):\n self.msg = msg\n self.args = args\n\n def __str__(self):\n msg, args = self.msg, self.args\n if args:\n if \"%\" in msg:\n msg = msg % args\n else:\n msg += \" \"\n msg += \" \".join(map(repr, args))\n return str(msg)\n\n def __repr__(self):\n return \"%s%r\" % (self.__class__.__name__, (self.msg,)+self.args)\n\nunwanted = (\"\", os.curdir, os.pardir)\n\nnczope = os.path.normcase(\"@@Zope\")\n\ndef getoriginal(path):\n \"\"\"Return the path of the Original file corresponding to path.\"\"\"\n return getspecial(path, \"Original\")\n\ndef getextra(path):\n \"\"\"Return the path of the Extra directory corresponding to path.\"\"\"\n return getspecial(path, \"Extra\")\n\ndef getannotations(path):\n \"\"\"Return the path of the Annotations directory corresponding to path.\"\"\"\n return getspecial(path, \"Annotations\")\n\ndef getspecial(path, what):\n \"\"\"Helper for getoriginal(), getextra(), getannotations().\"\"\"\n head, tail = os.path.split(path)\n return os.path.join(head, \"@@Zope\", what, tail)\n\ndef split(path):\n \"\"\"Split a path, making sure that the tail returned is real.\"\"\"\n head, tail = os.path.split(path)\n if tail in unwanted:\n newpath = os.path.normpath(path)\n head, tail = os.path.split(newpath)\n if tail in unwanted:\n newpath = os.path.realpath(path)\n head, tail = os.path.split(newpath)\n if head == newpath or tail in unwanted:\n raise Error(\"path '%s' is the filesystem root\", path)\n if not head:\n head = os.curdir\n return head, tail\n\ndef ensuredir(path):\n \"\"\"Make sure that the given path is a directory, creating it if necessary.\n\n This may raise OSError if the creation operation fails.\n \"\"\"\n if not os.path.isdir(path):\n os.makedirs(path)\n\ndef normalize(name):\n \"\"\"Normalize a filename to normalization form C.\n \n Linux and (most?) other Unix-like operating systems use the normalization\n form C (NFC) for UTF-8 encoding by default but do not enforce this.\n Darwin, the base of Macintosh OSX, enforces normalization form D (NFD),\n where a few characters are encoded in a different way.\n \"\"\"\n if sys.platform == 'darwin':\n if isinstance(name, unicode):\n name = unicodedata.normalize(\"NFC\", name)\n elif sys.getfilesystemencoding() == 'utf-8':\n name = unicode(name, encoding='utf-8')\n name = unicodedata.normalize(\"NFC\", name)\n name = name.encode('utf-8')\n return name\n \ndef encode(path, encoding=None):\n \"\"\"Encodes a path in its normalized form.\n \n Uses the filesystem encoding as a default encoding. Assumes that the given path\n is also encoded in the filesystem encoding.\n \"\"\"\n fsencoding = sys.getfilesystemencoding()\n if encoding is None:\n encoding = fsencoding\n if isinstance(path, unicode):\n return path.encode(encoding)\n return unicode(path, encoding=fsencoding).encode(encoding)\n \n","sub_path":"zope.fssync/tags/3.5.2/src/zope/fssync/fsutil.py","file_name":"fsutil.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198926119","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 24 19:51:55 2020\n\n@author: noahk\n\"\"\"\n\nimport requests\nimport json\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport aiohttp\nimport asyncio\nimport nest_asyncio\nfrom understat import Understat\nfrom fuzzywuzzy import fuzz,process\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nnest_asyncio.apply()\n\nasync def all_players():\n async with aiohttp.ClientSession() as session:\n understat = Understat(session)\n data = await understat.get_league_players(\"epl\", 2019)\n json_ = json.loads((json.dumps(data)))\n return json_\n\ndef normalize_to_df(json_data):\n df = pd.io.json.json_normalize(json_data)\n df = df.drop(['id', 'position'], axis = 1)\n return df\n\ndef connect_fpl_api():\n url = 'https://fantasy.premierleague.com/api/bootstrap-static/'\n r = requests.get(url)\n json = r.json()\n json.keys()\n \n elements_df = pd.DataFrame(json['elements'])\n elements_types_df = pd.DataFrame(json['element_types'])\n teams_df = pd.DataFrame(json['teams'])\n \n elements_df['position'] = elements_df.element_type.map(elements_types_df.set_index('id').singular_name)\n elements_df['team'] = elements_df.team.map(teams_df.set_index('id').name)\n elements_df['name'] = elements_df['first_name'] + ' ' + elements_df['second_name']\n \n final_df = elements_df[['name','team', 'position','total_points', 'selected_by_percent', 'now_cost',\\\n 'minutes', 'transfers_in', 'value_season','goals_scored','assists','clean_sheets',\\\n 'creativity','creativity_rank','threat','threat_rank','influence','influence_rank','ict_index',\\\n 'ict_index_rank','element_type','penalties_missed','points_per_game',\\\n 'bonus','bps']]\n final_df['name'] = final_df['name'].astype(str)\n final_df['team'] = final_df['team'].astype(str)\n final_df['creativity'] = final_df['creativity'].astype(float)\n final_df['threat'] = final_df['threat'].astype(float)\n final_df['ict_index'] = final_df['ict_index'].astype(float)\n final_df['points_per_game'] = final_df['points_per_game'].astype(float)\n final_df['influence'] = final_df['influence'].astype(float)\n final_df['value_season'] = final_df['value_season'].astype(float)\n final_df = final_df.sort_values(by=['value_season'], ascending=False).reset_index(drop=True)\n final_df['value_minutes'] = (final_df['value_season']/final_df['minutes'])*100\n return final_df\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n json_all_players = loop.run_until_complete(all_players())\n player_xStats = normalize_to_df(json_all_players)\n \n fpl_data = connect_fpl_api()\n value_players = fpl_data.sort_values(by=['value_minutes'], ascending=False)\n value_players = value_players.where(value_players['value_season'] > 10.0).dropna().reset_index(drop=True)\n \n fpl_gk = fpl_data[fpl_data['position'] == 'Goalkeeper'].reset_index(drop=True)\n fpl_def = fpl_data[fpl_data['position'] == 'Defender'].reset_index(drop=True)\n fpl_mid = fpl_data[fpl_data['position'] == 'Midfielder'].reset_index(drop=True)\n fpl_fwd = fpl_data[fpl_data['position'] == 'Forward'].reset_index(drop=True)\n\nfor i in range(0,len(fpl_data)):\n if fpl_data['team'][i] == 'Man Utd':\n fpl_data['team'][i] = 'Manchester United'\n elif fpl_data['team'][i] == 'Man City':\n fpl_data['team'][i] = 'Manchester City'\n elif fpl_data['team'][i] == 'Newcastle':\n fpl_data['team'][i] = 'Newcastle United'\n elif fpl_data['team'][i] == 'Sheffield Utd':\n fpl_data['team'][i] = 'Sheffield United'\n elif fpl_data['team'][i] == 'Spurs':\n fpl_data['team'][i] = 'Tottenham'\n elif fpl_data['team'][i] == 'Wolves':\n fpl_data['team'][i] = 'Wolverhampton Wanderers'\n elif fpl_data['name'][i] == 'Danny Rose':\n fpl_data['team'][i] = 'Newcastle United,Tottenham'\n elif fpl_data['name'][i] == 'Daniel Drinkwater':\n fpl_data['team'][i] = 'Aston Villa,Burnley'\n elif fpl_data['name'][i] == 'Tariq Lamptey':\n fpl_data['team'][i] = 'Brighton,Chelsea'\n elif fpl_data['name'][i] == 'Cenk Tosun':\n fpl_data['team'][i] = 'Crystal Palace,Everton'\n elif fpl_data['name'][i] == 'Ryan Bennett':\n fpl_data['team'][i] = 'Leicester,Wolverhampton Wanderers'\n elif fpl_data['name'][i] == 'Kyle Walker-Peters':\n fpl_data['team'][i] = 'Southampton,Tottenham'\n else:\n continue\n \ndef sum_ratio(x,y):\n return (fuzz.ratio(x,y)+fuzz.partial_ratio(x,y)+fuzz.token_sort_ratio(x,y)+fuzz.token_set_ratio(x,y))\n\ncols = player_xStats.columns.tolist()\ntst = pd.DataFrame(columns = cols)\nfor i in range(0,len(fpl_data)):\n for j in range(0,len(player_xStats)):\n if (sum_ratio(fpl_data['name'][i],player_xStats['player_name'][j])>=275)&(fpl_data['team'][i] == player_xStats['team_title'][j]):\n player_xStats['fpl_name'] = fpl_data.iloc[i,0]\n player_xStats['ratio_score'] = sum_ratio(fpl_data['name'][i],player_xStats['player_name'][j])\n tst = tst.append(pd.DataFrame(player_xStats.iloc[[j]]),ignore_index = True)\n else:\n continue\nratio_check = tst[['fpl_name','player_name','ratio_score']]\nmerge_data = tst.drop(index = [185,116,277,104,220,321,228])\ninitial_merge = fpl_data.merge(merge_data, left_on = 'name', right_on = 'fpl_name', how = 'inner')\ninitial_merge = initial_merge.drop(columns = ['assists_y','fpl_name','ratio_score','team_title','goals','time'])\ninitial_merge = initial_merge.reset_index(drop = True)\nfor i in range(27,38):\n initial_merge.iloc[:,i] = initial_merge.iloc[:,i].astype(float)\n \nfig, ax = plt.subplots(figsize=(20,20)) # Sample figsize in inches\nsns.heatmap(initial_merge.corr(), annot=True, linewidths=.5, ax=ax)\n\n\n","sub_path":"FPL_testing.py","file_name":"FPL_testing.py","file_ext":"py","file_size_in_byte":5859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"529785936","text":"# Malaria cell classifier\r\n\r\n\r\n#importing the libraries\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\nimport cv2 as cv\r\nimport matplotlib.pyplot as plt\r\nimport random\r\n\r\n# Defining variables\r\n\r\nDATADIR = \"./cell_images/\"\r\nCATEGORIES = [\"Uninfected\", \"Parasitized\"]\r\nIMG_SIZE = 50\r\ntraining_data = []\r\n\r\n\r\n# Creating the data set from images \r\n\r\ndef create_training_data():\r\n for category in CATEGORIES:\r\n path = os.path.join(DATADIR, category)\r\n class_num = CATEGORIES.index(category)\r\n for img in os.listdir(path):\r\n try:\r\n img_arr = cv.imread(os.path.join(path, img), cv.IMREAD_GRAYSCALE)\r\n new_img_arr = cv.resize(img_arr, (IMG_SIZE, IMG_SIZE))\r\n training_data.append([new_img_arr, class_num])\r\n except Exception as e:\r\n print(e)\r\n\r\ncreate_training_data()\r\n\r\n# Shuffle the data\r\n\r\nrandom.shuffle(training_data)\r\n\r\n# seperate Features and Labels and convert it to NumPy Array so that we can run it to tensorflow\r\n\r\nX = []\r\ny = []\r\n\r\nfor features, label in training_data:\r\n X.append(features)\r\n y.append(label)\r\n \r\nX = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)\r\ny = np.array(y)\r\n\r\n#normalize the array/image\r\nX = X / 255.0\r\n\r\n# check the shuffled data\r\nprint(y[0:10])\r\n\r\n# saving the dataset\r\n\r\nnp.save(\"malaria_X_normalized\", X)\r\nnp.save(\"malaria_y_normalized\", y)\r\n\r\n\r\n","sub_path":"Malaria Cell Classifier/models/Cell_classifier_dataset_extraction.py","file_name":"Cell_classifier_dataset_extraction.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"606578266","text":"from __future__ import print_function\n\nimport ctypes\nimport gc\nimport sys\nimport unittest\n\nfrom secureconfig import SecureString\n\n\ndef get_from_mem(ctypes_tuple):\n location = ctypes_tuple[0]\n size = ctypes_tuple[1]\n return ctypes.string_at(location, size)\n\n\ndef learn_mem(item):\n location = id(item)\n size = sys.getsizeof(item)\n return (location, size)\n\n\nclass TestSecureString(unittest.TestCase):\n def test_SecureString_has_str_methods(self):\n secret = SecureString(\"test\")\n str_methods = set(dir(str))\n ss_methods = set(dir(secret))\n self.assertEqual([s for s in str_methods if s not in ss_methods], [])\n\n def test_str_methods_still_work(self):\n # not going to test every single one. a smattering will do.\n secret = SecureString(\"more than just a dream\")\n self.assertFalse(secret.isupper())\n self.assertTrue(secret.islower())\n self.assertTrue(secret.startswith(\"more\"))\n self.assertEqual(secret.find(\"than\"), 5)\n\n def test_burn_method_zeroes__string(self):\n ss = SecureString(\"of all the things I've lost, I miss my mind the most\")\n ctuple = learn_mem(ss._string)\n ss.burn()\n gc.collect()\n result = get_from_mem(ctuple)\n self.assertNotIn(b\"things\", result)\n self.assertNotIn(b\"lost\", result)\n\n def test_SecureString_zeroes_on_del(self):\n ss = SecureString(\"it's a secret23 to everybody\")\n ctuple = learn_mem(ss._string)\n del ss\n gc.collect()\n result = get_from_mem(ctuple)\n with self.assertRaises(UnboundLocalError):\n print(ss) # noqa\n\n self.assertNotIn(b\"23\", result)\n self.assertNotIn(b\"everybody\", result)\n self.assertNotIn(b\"it's\", result)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_securestring.py","file_name":"test_securestring.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"200092863","text":"#Programming for the Puzzled -- Srini Devadas\n#You Can Read Minds (With a Little Calibration)\n#Five random cards are chosen and one of them is hidden.\n#Given four cards in a particular order, you can figure out what the fifth card is!\n\n#Deck is are a list of strings, each string is a card\n#The order of cards in the list matters.\ndeck = ['A_C', '2_C', '3_C', '4_C', '5_C', '6_C', '7_C', '8_C', '9_C', '10_C', 'J_C', 'Q_C',\n 'K_C', 'A_D', '2_D', '3_D', '4_D', '5_D', '6_D', '7_D', '8_D', '9_D', '10_D', 'J_D',\n 'Q_D', 'K_D', 'A_H', '2_H', '3_H', '4_H', '5_H', '6_H', '7_H', '8_H', '9_H', '10_H',\n 'J_H', 'Q_H', 'K_H', 'A_S', '2_S', '3_S', '4_S', '5_S',\n '6_S', '7_S', '8_S', '9_S', '10_S', 'J_S', 'Q_S', 'K_S']\n\n#This procedure figures out which card should be hidden based on the distance\n#between the two cards that have the same suit.\n#It returns the hidden card, the first exposed card, and the distance\ndef outputFirstCard(numbers, oneTwo, cards):\n\n\n encode = (numbers[oneTwo[0]] - numbers[oneTwo[1]]) % 13\n if encode > 0 and encode <= 6:\n hidden = oneTwo[0]\n other = oneTwo[1]\n else:\n hidden = oneTwo[1]\n other = oneTwo[0]\n encode = (numbers[oneTwo[1]] - numbers[oneTwo[0]]) % 13\n\n## #The following print statement is just for debugging!\n## print ('Hidden card is:', cards[hidden], 'and need to encode', encode)\n\n return hidden, other, encode\n\n\n#This procedure orders three cards depending on the number \"code\" that\n#needs to be encoded. \n#This part Should be changed in ex3\ndef outputNext3Cards(code, ind):\n \n if code == 1:\n second, third, fourth = ind[0], ind[1], ind[2]\n elif code == 2:\n second, third, fourth = ind[0], ind[2], ind[1]\n elif code == 3:\n second, third, fourth = ind[1], ind[0], ind[2] \n elif code == 4:\n second, third, fourth = ind[1], ind[2], ind[0]\n elif code == 5:\n second, third, fourth = ind[2], ind[0], ind[1]\n else:\n second, third, fourth = ind[2], ind[1], ind[0]\n\n print ('Second card is:', deck[second])\n print ('Third card is:', deck[third])\n print ('Fourth card is:', deck[fourth])\n\n \n#Sorts elements in tlist in ascending order.\ndef sortList(tlist):\n for index in range(0, len(tlist)-1):\n ismall = index\n for i in range(index, len(tlist)):\n if tlist[ismall] > tlist[i]:\n ismall = i\n tlist[index], tlist[ismall] = tlist[ismall], tlist[index]\n \n return\n\n#This procedure is similar to AssistantOrdersCards() except it\n#takes in a large number and \"randomly\" generates five cards.\ndef ComputerAssistant():\n\n print ('Cards are character strings as shown below.')\n print ('Ordering is:', deck)\n cards, cind, cardsuits, cnumbers = [], [], [], []\n numsuits = [0, 0, 0, 0]\n number = 0\n while number < 99999:\n number = int(input('Please give random number' +\n ' of at least 6 digits:'))\n\n# #Generating 5 all different indices\n inum = 0\n clist = []\n while len(clist) < 5:\n number = number * (inum + 1) // (inum + 2)\n n = number % 52\n ++ inum\n if not n in clist:\n clist.append(n)\n\n# #Generating the list of pairsuits\n pairsuitlist = []\n for i in range(5):\n n = clist[i]\n cards.append(deck[n])\n cind.append(n)\n cardsuits.append(n // 13)\n cnumbers.append(n % 13)\n numsuits[n // 13] += 1\n if numsuits[n // 13] > 1 and (not (n // 13) in pairsuitlist):\n pairsuitlist.append(n // 13)\n \n## #Just for debugging\n print (cards)\n\n encode_min = 7\n hidden_final, other_final = 0, 0\n for pairsuit in pairsuitlist:\n cardh = []\n for i in range(5):\n if cardsuits[i] == pairsuit:\n cardh.append(i)\n\n hidden, other, encode = outputFirstCard(cnumbers, cardh, cards)\n if encode < encode_min:\n encode_min = encode\n hidden_final = hidden\n other_final = other\n print ('First card is:', cards[other_final])\n \n remindices = []\n for i in range(5):\n if i != hidden_final and i != other_final:\n remindices.append(cind[i])\n\n sortList(remindices)\n outputNext3Cards(encode_min, remindices)\n\n guess = input('What is the hidden card?')\n if guess == cards[hidden]:\n print ('You are a Mind Reader Extraordinaire!')\n else:\n print ('Sorry, not impressed!')\n\n return\n\nComputerAssistant()\n","sub_path":"Puzzle3/magic_ex3.py","file_name":"magic_ex3.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"107300103","text":"# Rename from closest.\nimport maya.cmds as cmds\nimport maya.OpenMaya as OpenMaya\n\n__author__ = \"Thomas Mansencal\"\n__copyright__ = \"Copyright (C) 2010 - 2012 - Thomas Mansencal\"\n__license__ = \"GPL V3.0 - http://www.gnu.org/licenses/\"\n__maintainer__ = \"Thomas Mansencal\"\n__email__ = \"thomas.mansencal@gmail.com\"\n__status__ = \"Production\"\n\n__all__ = [\"stacksHandler\",\n\t\t\t\"getMPoint\",\n\t\t\t\"norme\",\n\t\t\t\"renameTargetsFromClosestSources\",\n\t\t\t\"pickSources_button_OnClicked\",\n\t\t\t\"pickTargets_button_OnClicked\",\n\t\t\t\"renameFromClosest_button_OnClicked\",\n\t\t\t\"renameFromClosest_window\",\n\t\t\t\"renameFromClosest\",\n\t\t\t\"IRenameFromClosest\"]\n\ndef stacksHandler(object):\n\t\"\"\"\n\tThis decorator is used to handle various Maya stacks.\n\n\t:param object: Python object. ( Object )\n\t:return: Python function. ( Function )\n\t\"\"\"\n\n\tdef stacksHandlerCall(*args, **kwargs):\n\t\t\"\"\"\n\t\tThis decorator is used to handle various Maya stacks.\n\n\t\t:return: Python object. ( Python )\n\t\t\"\"\"\n\n\t\tcmds.undoInfo(openChunk=True)\n\t\tvalue = object(*args, **kwargs)\n\t\tcmds.undoInfo(closeChunk=True)\n\t\t# Maya produces a weird command error if not wrapped here.\n\t\ttry:\n\t\t\tcmds.repeatLast(addCommand=\"python(\\\"import %s; %s.%s()\\\")\" % (__name__, __name__, object.__name__), addCommandLabel=object.__name__)\n\t\texcept:\n\t\t\tpass\n\t\treturn value\n\n\treturn stacksHandlerCall\n\ndef getMPoint(point):\n\t\"\"\"\n\tThis definition returns an MPoint.\n\n\t:param point: Point. ( List )\n\t:return: MPoint ( MVector )\n\t\"\"\"\n\n\treturn OpenMaya.MPoint(point[0], point[1], point[2])\n\ndef norme(pointA, pointB):\n\t\"\"\"\n\tThis definition returns the norme of a vector.\n\n\t:param pointA: Point A. ( List )\n\t:param pointB: Point B. ( List )\n\t:return: Norme ( Float )\n\t\"\"\"\n\n\tmPointA = getMPoint(pointA)\n\tmPointB = getMPoint(pointB)\n\tmVector = mPointA - mPointB\n\treturn mVector.length()\n\ndef renameTargetsFromClosestSources(sources, targets, suffixe=\"__\"):\n\t\"\"\"\n\tThis definition renames the targets from closest sources.\n\n\t:param sources: Sources. ( List )\n\t:param targets: Targets. ( List )\n\t:param suffixe: Suffixe. ( String )\n\t\"\"\"\n\n\tfor target in targets:\n\t\ttargetBarycenter = cmds.objectCenter(target, gl=True)\n\t\tnormes = {}\n\t\tfor source in sources:\n\t\t\tnormes[source] = norme(targetBarycenter, cmds.objectCenter(source, gl=True))\n\t\tclosest = min(normes, key=lambda item: normes[item])\n\t\tcmds.rename(target, \"%s%s\" % (closest.split(\"|\")[-1], suffixe))\n\n@stacksHandler\ndef pickSources_button_OnClicked(state=None):\n\t\"\"\"\n\tThis definition is triggered by the **pickSources_button** button when clicked.\n\n\t:param state: Button state. ( Boolean )\n\t\"\"\"\n\n\tcmds.textField(\"sources_textField\", edit=True, text=\", \".join(cmds.ls(sl=True, l=True)))\n\n@stacksHandler\ndef pickTargets_button_OnClicked(state=None):\n\t\"\"\"\n\tThis definition is triggered by the **pickTargets_button** button when clicked.\n\n\t:param state: Button state. ( Boolean )\n\t\"\"\"\n\n\tcmds.textField(\"targets_textField\", edit=True, text=\", \".join(cmds.ls(sl=True, l=True)))\n\n@stacksHandler\ndef renameFromClosest_button_OnClicked(state=None):\n\t\"\"\"\n\tThis definition is triggered by the **renameFromClosest_button** button when clicked.\n\n\t:param state: Button state. ( Boolean )\n\t\"\"\"\n\n\tsources = [source for source in cmds.textField(\"sources_textField\", query=True, text=True).split(\", \") if cmds.objExists(source)]\n\ttargets = [target for target in cmds.textField(\"targets_textField\", query=True, text=True).split(\", \")\tif cmds.objExists(target)]\n\n\trenameTargetsFromClosestSources(sources, targets)\n\ndef renameFromClosest_window():\n\t\"\"\"\n\tThis definition creates the 'Rename From Closest' main window.\n\t\"\"\"\n\n\tcmds.windowPref(enableAll=False)\n\n\tif (cmds.window(\"renameFromClosest_window\", exists=True)):\n\t\tcmds.deleteUI(\"renameFromClosest_window\")\n\n\tcmds.window(\"renameFromClosest_window\",\n\t\ttitle=\"Rename From Closest\",\n\t\twidth=320)\n\n\tspacing = 5\n\n\tcmds.columnLayout(adjustableColumn=True, rowSpacing=spacing)\n\n\tcmds.rowLayout(numberOfColumns=3, columnWidth3=(125, 150, 130), adjustableColumn=2, columnAlign=(2, \"left\"), columnAttach=[(1, \"both\", spacing), (2, \"both\", spacing), (3, \"both\", spacing)])\n\tcmds.text(label=\"Sources:\")\n\tsources_textField = cmds.textField(\"sources_textField\")\n\tcmds.button(\"pickSources_button\", label=\"Pick Sources!\", command=pickSources_button_OnClicked)\n\tcmds.setParent(topLevel=True)\n\n\tcmds.rowLayout(numberOfColumns=3, columnWidth3=(125, 150, 130), adjustableColumn=2, columnAlign=(2, \"left\"), columnAttach=[(1, \"both\", spacing), (2, \"both\", spacing), (3, \"both\", spacing)])\n\tcmds.text(label=\"Targets:\")\n\ttargets_textField = cmds.textField(\"targets_textField\")\n\tcmds.button(\"pickTargets_button\", label=\"Pick Targets!\", command=pickTargets_button_OnClicked)\n\tcmds.setParent(topLevel=True)\n\n\tcmds.separator(style=\"single\")\n\n\tcmds.button(\"renameFromClosest_button\", label=\"Rename Targets!\", command=renameFromClosest_button_OnClicked)\n\n\tcmds.showWindow(\"renameFromClosest_window\")\n\n\tcmds.windowPref(enableAll=True)\n\ndef renameFromClosest():\n\t\"\"\"\n\tThis definition launches the 'Rename From Closest' main window.\n\t\"\"\"\n\n\trenameFromClosest_window()\n\n@stacksHandler\ndef IRenameFromClosest():\n\t\"\"\"\n\tThis definition is the renameFromClosest definition Interface.\n\t\"\"\"\n\n\trenameFromClosest()\n","sub_path":"src/maya/snippets/libraries/renameFromClosest.py","file_name":"renameFromClosest.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"632255867","text":"import os\nimport numpy as np\nfrom PySide6.QtCore import *\nfrom PySide6.QtGui import *\nfrom PySide6.QtWidgets import *\nfrom pathlib import Path\nfrom scipy.spatial.distance import cdist\nfrom typing import Union\n\nfrom .circle import PosCircleItem\nfrom ....utils.qtutils import dist_pts, qpointf_to_list\n\n\nclass PhotoViewer(QGraphicsView):\n\n zoom_factor = 1.25\n zoom_signal = Signal()\n\n def __init__(self, parent=None):\n super(PhotoViewer, self).__init__(parent)\n self._zoom = 0\n self._empty = True\n self._grabbed = False\n self._scene = QGraphicsScene(self)\n self._photo = QGraphicsPixmapItem()\n self._scene.addItem(self._photo)\n\n self.setScene(self._scene)\n self.setTransformationAnchor(QGraphicsView.ViewportAnchor.AnchorUnderMouse)\n self.setResizeAnchor(QGraphicsView.ViewportAnchor.AnchorUnderMouse)\n self.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)\n self.setFrameShape(QFrame.Shape.NoFrame)\n\n def hasPhoto(self):\n return not self._empty\n\n def fitInView(self):\n rect = QRectF(self._photo.pixmap().rect())\n if not rect.isNull():\n self.setSceneRect(rect)\n if self.hasPhoto():\n unity = self.transform().mapRect(QRectF(0, 0, 1, 1))\n self.scale(1 / unity.width(), 1 / unity.height())\n viewrect = self.geometry()\n viewrect.setRect(0, 0, viewrect.width(), viewrect.height())\n scenerect = self.transform().mapRect(rect)\n factor = max(viewrect.width() / scenerect.width(),\n viewrect.height() / scenerect.height())\n factor *= self.__class__.zoom_factor**self._zoom\n self.scale(factor, factor)\n\n def setPhoto(self, back_im: np.ndarray=None):\n height, width, chnum = back_im.shape\n bytesPerLine = width * chnum\n imgFormat = QImage.Format.Format_RGBA8888 if chnum == 4 else QImage.Format.Format_RGB888\n qImg = QImage(back_im.data, width, height, bytesPerLine, imgFormat)\n pixmap = QPixmap(qImg)\n\n self._zoom = 0\n if pixmap and not pixmap.isNull():\n self._empty = False\n self.setDragMode(QGraphicsView.DragMode.ScrollHandDrag)\n self._photo.setPixmap(pixmap)\n else:\n self._empty = True\n self.setDragMode(QGraphicsView.DragMode.NoDrag)\n self._photo.setPixmap(QPixmap())\n self.fitInView()\n\n def zoom_in(self):\n if self.hasPhoto():\n factor = self.__class__.zoom_factor\n self._zoom += 1\n self.scale(factor, factor)\n\n def zoom_out(self):\n if self.hasPhoto():\n factor = 1 / self.__class__.zoom_factor\n self._zoom -= 1\n if self._zoom > 0:\n self.scale(factor, factor)\n else:\n self._zoom = 0 \n self.fitInView()\n\n def get_zoom_factor(self):\n return self._zoom\n\n def wheelEvent(self, event):\n numDegrees = event.angleDelta() / 8\n numSteps = (numDegrees / 15).y()\n \n if self.hasPhoto():\n if numSteps > 0:\n self.zoom_in()\n self.zoom_signal.emit()\n elif numSteps < 0 and self._zoom != 0:\n self.zoom_out()\n self.zoom_signal.emit()\n \n\nclass PalmPositionCanvas(PhotoViewer):\n\n add_win_signal = Signal(QPointF, str)\n add_pos_signal = Signal()\n drop_img_signal = Signal(str)\n drop_pos_signal = Signal(str)\n cv_zoom_display_signal = Signal(QPoint, QPoint)\n cv_zoom_hide_signal = Signal()\n\n def __init__(self, parent, geometry: QRect, stylesheet: str):\n super(PalmPositionCanvas, self).__init__(parent)\n self.setStyleSheet(stylesheet)\n self.setGeometry(geometry)\n self.setDragMode(QGraphicsView.DragMode.ScrollHandDrag)\n self.setViewportUpdateMode(QGraphicsView.ViewportUpdateMode.FullViewportUpdate)\n \n self.pos_group = self._scene.createItemGroup(list())\n self.trn_group = self._scene.createItemGroup(list())\n self.crp_group = self._scene.createItemGroup(list())\n \n self.detecting = False # preventing from right-click when detecting process begin \n self.zoom_display = False\n\n self._mode = 'select'\n self._factor = 1.\n self._pixel_size = 1.\n self._add_point = False\n\n def scene_reset(self):\n \"\"\" Clear all items aon canvas and then reset the scene. \"\"\"\n if not self.hasPhoto(): return\n self._scene.clear()\n self._photo = QGraphicsPixmapItem()\n self._empty = True\n self.add_into_scene(self._photo)\n\n def mousePressEvent(self, event: QMouseEvent):\n # view_rect = self.geometry()\n # lt_pt = self.mapToScene(0, 0)\n # print(f'Left Top: {lt_pt.x()}')\n # print(f'Right Bottom: {self.mapToScene(view_rect.width(), view_rect.height())}')\n if not self.hasPhoto(): return\n\n scene_pos = self.mapToScene(event.pos())\n if self._mode in 'select':\n if event.buttons() == Qt.MouseButton.RightButton:\n self.zoom_display = True\n\n elif self._mode in 'auto':\n # Shift + Right: Remove the train window.\n # Shift + Left : Create new train window.\n # Press + Right: Create new detect window.\n if event.buttons() == Qt.MouseButton.RightButton and \\\n event.modifiers() == Qt.KeyboardModifier.ShiftModifier:\n self._delete_closeast_win(scene_pos)\n elif event.buttons() == Qt.MouseButton.LeftButton and \\\n event.modifiers() == Qt.KeyboardModifier.ShiftModifier:\n self.add_win_signal.emit(scene_pos, 'left')\n elif event.buttons() == Qt.MouseButton.RightButton and \\\n event.modifiers() == Qt.KeyboardModifier.NoModifier and not self.detecting:\n self.detecting = True \n self.add_win_signal.emit(scene_pos, 'right')\n\n elif self._mode in 'crop':\n # Shift + Right: Remove the crop window.\n # Press + Left : Create new crop window.\n if event.buttons() == Qt.MouseButton.RightButton and \\\n event.modifiers() == Qt.KeyboardModifier.ShiftModifier:\n self._delete_closeast_win(scene_pos)\n elif event.buttons() == Qt.MouseButton.LeftButton and \\\n event.modifiers() == Qt.KeyboardModifier.ShiftModifier:\n self.add_win_signal.emit(scene_pos, None)\n\n elif self._mode == 'erase':\n # Press + Left : Create new erase window.\n if event.buttons() == Qt.MouseButton.RightButton and \\\n event.modifiers() == Qt.KeyboardModifier.NoModifier:\n self.add_win_signal.emit(scene_pos, None)\n \n super().mousePressEvent(event)\n\n def mouseDoubleClickEvent(self, event: QMouseEvent):\n if self._mode == 'select' and self._add_point: \n self._add_remove_pos_in_canvas(event.pos())\n\n def mouseMoveEvent(self, event: QMouseEvent) -> None:\n # tracking mouse position for key press event\n self.mouse_pos = event.pos()\n if self.zoom_display and event.buttons() == Qt.MouseButton.RightButton:\n self.cv_zoom_display_signal.emit(\n self.mouse_pos,\n self.mapToScene(event.pos()).toPoint()\n )\n return super().mouseMoveEvent(event)\n\n def mouseReleaseEvent(self, event: QMouseEvent) -> None:\n if self.zoom_display:\n self.zoom_display = False\n self.cv_zoom_hide_signal.emit()\n return super().mouseReleaseEvent(event)\n\n def keyPressEvent(self, keyEvent: QKeyEvent) -> None:\n if self._mode == 'select' and \\\n self._add_point and keyEvent.key() == Qt.Key.Key_Space:\n self._add_remove_pos_in_canvas(self.mouse_pos)\n\n def dragEnterEvent(self, event: QDragEnterEvent) -> None:\n if self.drop_accepted(event):\n event.acceptProposedAction()\n\n def dragMoveEvent(self, event: QGraphicsSceneDragDropEvent) -> None:\n if self.drop_accepted(event):\n event.acceptProposedAction()\n\n def dragLeaveEvent(self, event: QDragLeaveEvent) -> None:\n ...\n\n def drop_accepted(self, event: Union[QDragEnterEvent, QGraphicsSceneDragDropEvent]) -> bool:\n mData = event.mimeData()\n if mData.hasUrls() and len(mData.urls()) == 1:\n suffix = Path(mData.urls()[0].toLocalFile()).suffix\n if suffix == '.tif':\n return True\n elif suffix == '.csv' and self.hasPhoto():\n return True\n return False\n\n def dropEvent(self, event: QDropEvent) -> None:\n url = event.mimeData().urls()[0].toLocalFile()\n if Path(url).suffix == '.tif':\n self.drop_img_signal.emit(url)\n elif Path(url).suffix == '.csv':\n self.drop_pos_signal.emit(url)\n\n def add_into_scene(self, it: QGraphicsItem):\n self._scene.addItem(it)\n\n def remove_from_scene(self, it: QGraphicsItem):\n self._scene.removeItem(it)\n\n def set_factor(self, factor: float):\n self._factor = factor\n\n def set_pixel_size(self, pixel_size: float):\n self._pixel_size = pixel_size\n\n def set_mode(self, mode: str):\n \"\"\" Setiing whether users can change the points or not. \"\"\"\n self._mode = mode\n if mode == 'select':\n self.set_add_point(True)\n PosCircleItem.set_changeable(True)\n else:\n self.set_add_point(False)\n PosCircleItem.set_changeable(False)\n\n # ================================\n # Item Group related\n # ================================\n\n def reset_group(self, mode: str=None):\n assert mode in ('select', 'auto', 'crop', None), \\\n f\"Unrecognized mode '{mode}'\"\n if mode is None: mode = self._mode\n\n if mode == 'select':\n self.remove_from_scene(self.pos_group)\n self.pos_group = self._scene.createItemGroup(list())\n elif mode == 'auto':\n self.remove_from_scene(self.trn_group)\n self.trn_group = self._scene.createItemGroup(list())\n elif mode == 'crop':\n self.remove_from_scene(self.crp_group)\n self.crp_group = self._scene.createItemGroup(list())\n\n # ================================\n # Position Items related\n # ================================\n\n def palm_pos_data_loading(self, positions: np.ndarray, mode: str='insert'):\n assert mode in ['insert', 'override']\n if len(positions) == 0: return\n\n positions = (positions * self._factor).astype(int)\n\n if mode == 'override': \n self.reset_group('select')\n elif len(self.pos_group.childItems()) != 0: \n pts = self.get_palm_pos_data()\n min_dists = cdist(positions, pts).min(axis=1)\n positions = positions[min_dists > self.gis_to_px(float(os.environ['CLOSE_DIST_IN_PALM']))]\n\n for x, y in positions: \n self.pos_group.addToGroup(PosCircleItem(\n pos=QPoint(x, y),\n csize=self.gis_to_px(float(os.environ[\"POS_SIZE_IN_PALM\"]))))\n\n self.set_add_point(True)\n\n def set_add_point(self, mode: bool):\n self._add_point = mode\n\n def get_palm_pos_data(self) -> np.ndarray:\n return np.array([qpointf_to_list(it.rect().center()) for it in self.pos_group.childItems()])\n\n def get_palm_radius_data(self) -> np.ndarray:\n return np.array([it.radius for it in self.pos_group.childItems()])\n\n def _add_remove_pos_in_canvas(self, mouse_pos: QPoint):\n pos = self.mapToScene(mouse_pos)\n self.add_pos_signal.emit()\n\n dc, ic = np.Inf, None\n for it in self.pos_group.childItems():\n dist = dist_pts(it.rect().center(), pos)\n if dist < dc: dc, ic = dist, it\n \n if self.px_to_gis(dc) <= float(os.environ['CLOSE_DIST_IN_PALM']):\n self.remove_from_scene(ic)\n else:\n circle = PosCircleItem(\n pos=pos,\n csize=self.gis_to_px(float(os.environ[\"POS_SIZE_IN_PALM\"])))\n self.pos_group.addToGroup(circle)\n \n @property\n def no_pts(self) -> bool:\n return len(self.pos_group.childItems()) == 0\n\n # ================================\n # Crop Windows related\n # ================================\n\n def get_all_win_coords(self) -> np.ndarray:\n \"\"\" Return all windows' coordinates. \"\"\"\n gp = self.trn_group if self._mode == 'auto' else self.crp_group\n windows = []\n for rects in gp.childItems():\n windows.append(list(map(int, rects.originRect().getCoords())))\n return np.array(windows)\n\n def _delete_closeast_win(self, pos: QPointF):\n \"\"\" Remove the most close window to pressed position. \"\"\"\n assert self._mode in ['auto', 'crop']\n\n cdist = np.inf # candidate distance\n citem = None # candidate index\n\n if self._mode == 'crop':\n gp = self.crp_group\n elif self._mode == 'auto':\n gp = self.trn_group\n\n for it in gp.childItems():\n rect = it.originRect()\n if not rect.contains(pos): continue\n cx = rect.x() + rect.width() // 2\n cy = rect.y() + rect.height() // 2\n dist = dist_pts(pos, QPointF(cx, cy))\n if dist < cdist:\n cdist, citem = dist, it\n\n if citem is not None: \n self.remove_from_scene(citem)\n\n # ================================\n # Other Utilities\n # ================================\n\n def gis_to_px(self, size: float) -> int:\n return int(size / self._pixel_size * self._factor)\n\n def px_to_gis(self, size: int) -> float:\n return size / self._factor * self._pixel_size\n","sub_path":"pkgs/app/palm/item/canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":14088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"16868908","text":"import numpy as np\nfrom torch.utils import data\n\n\n##########################################################################\n# Dataset class that feeds data into a data generator. Takes in a list of folder paths in which a patient's scans are stored,\n# a list of the names of the folder / patient ids and whether segmentation labels are provided and should be loaded.\n# Returns two torch arrays, one that contains the multi-modal scans and the other that contains the masks.\n##########################################################################\n\n\nclass Dataset(data.Dataset):\n def __init__(self, folder_path, folder_id, seg_provided):\n self.folder_paths = folder_path\n self.folder_ids = folder_id\n self.seg_provided = seg_provided\n\n def __len__(self):\n return len(self.folder_ids)\n\n def __getitem__(self, index):\n data_folder = self.folder_paths[index]\n data_id = self.folder_ids[index]\n X = np.load(r\"{}/{}_scans.npy\".format(data_folder, data_id))\n if self.seg_provided:\n y = np.load(r\"{}/{}_mask.npy\".format(data_folder, data_id))\n return X, y\n else:\n return X\n\n","sub_path":"data_loaders.py","file_name":"data_loaders.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"302618271","text":"from assemblygen import *\nimport globalvars as g\n\n#Breaks string representation of array to array name and index\n#if not array returns (Variable name,None)\ndef varname(var):\n if(isInt(var)):\n return var,None\n elif(var.find(\"[\")!=-1):\n n=var.find(\"[\")\n m=var.find(\"]\")\n index=var[n+1:m]\n # g.variables.append((\"v_\"+var[0:n],1)) //arrays are declared using special command declare\n return \"v_\"+var[0:n],index\n else:\n g.variables.append((\"v_\"+var,0))\n return \"v_\"+var,None\n\nclass instruction(object):\n # convert each line of code to an object instruction class\n # also finds basicblocks and labels \n def convert(self, param):\n # print(param)\n if(len(param)==1):\n return 0\n self.lineno=param[0]\n self.op=param[1]\n # print(param)\n if (param[1]==\"ifgoto\"):\n self.jmp=True\n self.cmpl=True\n self.cmpltype=param[2]\n self.src1,self.src1index=varname(param[3])\n self.src2,self.src2index=varname(param[4])\n self.jlno=param[5]\n g.basicblock.append(int(self.lineno))\n # g.basicblock.append(int(self.jlno)-1)\n # g.splitins[i].jlno\n # g.marker.append(int(self.jlno)-1)\n elif(param[1]==\"goto\"):\n self.jmp=True\n self.jlno=\"l_\"+param[2]\n g.basicblock.append(int(self.lineno))\n elif (param[1]==\"call\"):\n g.basicblock.append(int(self.lineno))\n if(int(self.lineno)<=len(g.splitins)):\n g.basicblock.append(int(self.lineno)+1)\n self.func=True\n self.funcname=\"u_\"+param[2]\n elif (param[1]==\"ret\"):\n g.debug(\"instruction.py :: ret\"+str(param))\n if(len(param)==3):\n self.dst,self.dstindex=varname(param[2])\n else:\n self.dst=None\n self.returnc=True\n elif (param[1]==\"func\"):\n # print(\"i m here\")\n # g.basicblock.append(int(self.lineno))\n g.marker.append(int(self.lineno)-1)\n for i in range(3,len(param)):\n self.paramlist.append(varname(param[i]))\n self.lbl=True\n self.lblname=\"u_\"+param[2]\n elif (param[1]==\"label\"):\n # print(\"i m here\")\n g.basicblock.append(int(self.lineno)-1)\n g.marker.append(int(self.lineno)-1)\n self.lbl=False\n self.lblname=\"l_\"+param[2]\n elif (param[1]==\"print\"):\n g.debug(\"printer\")\n self.printc=True\n self.paramlist =[]\n g.printstrings.append([\"str_\"+str(self.lineno),str(param[2][:-1]+\"\\\\0\\\"\")])\n self.paramlist.append(\"$str_\"+str(self.lineno))\n # g.debug(\"len of param :: \"+str(len(param)))\n for i in range(3,len(param)):\n temp,tempindex=varname(param[i])\n self.paramlist.append([temp,tempindex])\n g.debug(\"print line :: \"+str(self.paramlist))\n g.debug(\"string line :: \"+str(g.printstrings))\n # self.src1,self.src1index=varname(param[2])\n elif (param[1]==\"input\"):\n self.inputc=True\n self.src1,self.src1index=varname(param[2])\n elif (param[1]==\"=\" or param[1]=='not'):\n self.dst,self.dstindex=varname(param[2])\n self.src1,self.src1index=varname(param[3])\n # g.variables.append(varname(param[2]))\n # g.variables.append(varname(param[3]))\n elif (param[1]==\"push\" or param[1]==\"pop\"):\n self.dst,self.dstindex=varname(param[2])\n elif (param[1]==\"declare\"):\n g.variables.append((\"v_\"+param[2],param[3]))\n else:\n # print(param)\n self.dst,self.dstindex=varname(param[2])\n self.src1,self.src1index=varname(param[3])\n self.src2,self.src2index=varname(param[4])\n # g.variables.append(varname(param[2]))\n # g.variables.append(varname(param[3]))\n # g.variables.append(varname(param[4]))\n # prints an object of this class\n # Only for debugging purposes\n def printobj(self):\n g.debug(\"line no: \"+self.lineno)\n g.debug(\"op: \"+self.op)\n g.debug(\"dst: \"+str(self.dst))\n g.debug(\"src1: \"+str(self.src1))\n g.debug(\"src2: \"+str(self.src2))\n g.debug(\"jmp: \"+str(self.jmp))\n g.debug(\"cmpl: \"+str(self.cmpl))\n g.debug(\"cmpltype: \"+str(self.cmpltype))\n g.debug(\"jlno: \"+str(self.jlno))\n g.debug(\"lbl: \"+str(self.lbl))\n g.debug(\"lblname: \"+str(self.lblname))\n g.debug(\"func: \"+str(self.func))\n g.debug(\"funcname: \"+str(self.funcname))\n g.debug(\"print: \"+str(self.printc))\n g.debug(\"input: \"+str(self.inputc))\n g.debug(\"return: \"+str(self.returnc))\n g.debug(\"\\n\")\n # Used for initialisation\n def __init__(self):\n self.lineno=0\n self.op=None #operator\n self.dst=None #destination\n self.dstindex=None #Only for Array\n self.src1=None #source1\n self.src1index=None #Only For Array \n self.src2=None #source2\n self.src2index=None #Only For Array\n self.jmp=False #if jump or not\n self.cmpl=False #if compare or not\n self.cmpltype=None\n self.jlno=0 #line number to jump to\n self.lbl=False #label of a function\n self.lblname=None #Name of label\n self.func=False\n self.funcname=None\n self.printc=False #If we have to print or not(lib func)\n self.inputc=False #If we have to take input or not(lib func)\n self.returnc=False #If we have to return or not(lib func)\n self.paramlist=[]\n","sub_path":"asgn5/asgn2/src/instruction.py","file_name":"instruction.py","file_ext":"py","file_size_in_byte":5855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"619983526","text":"#############################################################\r\n# Datadog Teamcenter Log Monitors Creation #\r\n#############################################################\r\n\r\n# This Script Contains monitor Creation for:\r\n# 1. FSC log Monitoring \r\n# 2. Web log Monitoring\r\n# 3. Pool log Monitoring \r\n############################################################# \r\n\r\nfrom datadog import api,initialize\r\n\r\noptions = {\r\n 'api_key': '',\r\n 'app_key': ''\r\n}\r\n\r\ninitialize(**options)\r\n\r\n# Create a new monitor\r\nmonitor_options = {\r\n \"notify_no_data\": False\r\n}\r\ntags = [\"datadog:True\", \"project:\", \"environment:\"]\r\napi.Monitor.create(\r\n type=\"log alert\",\r\n query=\"logs(\\\"source:java service: status:(error OR warn OR critical)\\\").index(\\\"*\\\").rollup(\\\"count\\\").last(\\\"15m\\\") >= 2\",\r\n name=\" - - - Warning/Error/Critical Message detected in the Logs!!!\",\r\n message=\" \\n@
    \",\r\n tags=tags,\r\n options = {\r\n\t\t\"notify_audit\": False,\r\n\t\t\"locked\": False,\r\n\t\t\"timeout_h\": 0,\r\n\t\t\"silenced\": {\r\n\t\t\t\"*\": None\r\n\t\t},\r\n\t\t\"include_tags\": True,\r\n\t\t\"thresholds\": {\r\n\t\t\t\"critical\": 2,\r\n\t\t\t\"warning\": 1\r\n\t\t},\r\n\t\t\"new_host_delay\": 300,\r\n\t\t\"queryConfig\": {\r\n\t\t\t\"logset\": {\r\n\t\t\t\t\"name\": \"*\"\r\n\t\t\t},\r\n\t\t\t\"track\": \"logs\",\r\n\t\t\t\"timeRange\": {\r\n\t\t\t\t\"to\": 1594146980936,\r\n\t\t\t\t\"live\": True,\r\n\t\t\t\t\"from\": 1594101980936\r\n\t\t\t},\r\n\t\t\t\"queryString\": \"source:java service: status:(error OR warn OR critical)\",\r\n\t\t\t\"indexes\": [\r\n\t\t\t\t\"*\"\r\n\t\t\t],\r\n\t\t\t\"queryIsFailed\": False\r\n\t\t},\r\n\t\t\"notify_no_data\": False,\r\n\t\t\"renotify_interval\": 0,\r\n\t\t\"groupby_simple_monitor\": False,\r\n\t\t\"enable_logs_sample\": True,\r\n\t\t\"aggregation\": {\r\n\t\t\t\"metric\": \"count\",\r\n\t\t\t\"type\": \"count\",\r\n\t\t\t\"groupBy\": \"\"\r\n\t\t}\r\n\t}\r\n)\r\n","sub_path":"pythoncode/log_monitor.py","file_name":"log_monitor.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"65557334","text":"# KF, HPF filter class\r\nimport numpy as np\r\n\r\nclass Kalman:\r\n def __init__(self, A, H, R, Q, P, x, t_predict):\r\n '''\r\n A: 'const' state transition matrix\r\n H: 'const' observation matrix\r\n R: 'const' covariance of observation noise matrix \r\n Q: 'const' covariance of process noise, function of sigma_jerk\r\n P: 'initial' covariance marix\r\n x: 'initial' state matrix\r\n '''\r\n self.A=A\r\n self.A_trans = A.transpose()\r\n self.H=H\r\n self.H_trans = H.transpose()\r\n self.R=R\r\n self.Q=Q\r\n self.P=P\r\n self.x=x\r\n self.t_predict=t_predict \r\n \r\n def update(self, a, t_predict=None, Q=None):\r\n # 예측 시간간격과 sigma_jerk를 실시간으로 수정가능\r\n if t_predict: self.t_predict = t_predict\r\n if Q: self.Q = Q\r\n \r\n z = np.array([[a]])\r\n x_ = np.dot(self.A, self.x)\r\n P_ = np.dot(self.A, np.dot(self.P, self.A_trans)) + self.Q\r\n\r\n self.K = np.dot(P_, np.dot(self.H_trans, \\\r\n 1/(np.dot(self.H, np.dot(P_, self.H_trans)) + self.R)))\r\n self.x = x_ + np.dot(self.K, (z - np.dot(self.H, x_)))\r\n self.P = P_ - np.dot(self.K, np.dot(self.H, P_))\r\n \r\n x_predict = self.x\r\n for j in range(self.t_predict):\r\n x_predict = np.dot(self.A, x_predict)\r\n\r\n return (self.x[0][0], x_predict[0][0])\r\n # (filtered position, predicted position)\r\n\r\n\r\nclass HPF:\r\n def __init__(self, RC, sampling_duration):\r\n self.alpha = RC / (RC + sampling_duration)\r\n self.start=False\r\n \r\n def update(self, x):\r\n if self.start==False:\r\n self.start=True\r\n self.last_input = x\r\n self.last_output = x\r\n return x\r\n \r\n else:\r\n self.last_output = self.alpha * (self.last_output + x - self.last_input)\r\n self.last_input = x\r\n return self.last_output\r\n \r\n","sub_path":"filter_class.py","file_name":"filter_class.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"608292574","text":"import numpy as np\nfrom PIL import Image\nfrom numpy import asarray\nfrom matplotlib import pyplot\n\nimage = Image.open(\"kobe.png\")\nw, h = image.size\nimage_New = image.resize((w, h * 2))\ndata = asarray(image)\nprint(\"data array size:\", data.shape)\nprint(data[1: 10, 1: 10 , 1])\nimage2 = Image.fromarray(data[:, :, 1])\ndata2 = data[0: 10, 0: 10, 1] // 8 * 8\n\nimagex1 = Image.new(\"RGB\", (256, 256), color=(255,0,0))\n\ndatax1 = np.array(imagex1)\ndatax1.setflags(write=1)\nfor i in range(256):\n datax1[:, i, 0] = i\n\ndata3 = data\nimage3 = Image.fromarray(data2)\nimagex2 = Image.fromarray(datax1)\n#print(data2[0:10,0:10])\n#print(image2.mode)\n #print(image2.size)\n#image2.save(\"outP1.jpg\")\nimage3.save(\"outP2.jpg\")\n#pyplot.imshow(image2)\nimage_New.save(\"outP3.jpg\")\npyplot.imshow(imagex2)\npyplot.imshow(image_New)\npyplot.show()","sub_path":"Numpy/NumpyDemo3.py","file_name":"NumpyDemo3.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"487373512","text":"#!/usr/bin/env python3\nimport requests\nimport json\n\n\ndef request_json(url):\n return json.loads(requests.get(url).text)\n\n\ndef main():\n satellites = request_json(\"https://api.wheretheiss.at/v1/satellites\")\n\n for sat in satellites:\n if sat[\"name\"] == \"iss\":\n sattelite_id = sat[\"id\"]\n\n satellite = request_json(\n f\"https://api.wheretheiss.at/v1/satellites/{sattelite_id}\"\n )\n print(\n f\"the latitude of the {satellite['name']} is {satellite['latitude']}\\n\"\n f\"the longitude of the {satellite['name']} is {satellite['latitude']}\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"webscraping/ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"188280126","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nЗадание на закрепление знаний по модулю json. Есть файл orders в формате JSON с информацией о заказах.\nНаписать скрипт, автоматизирующий его заполнение данными. Для этого:\n\"\"\"\nimport datetime\nimport json\nimport os\n\n\ndef write_order_to_json(item, quantity, price, buyer, date):\n data = {\n \"item\": item,\n \"quantity\": int(quantity),\n \"price\": int(price),\n \"buyer\": buyer,\n \"date\": date\n }\n\n with open(os.path.join('assets', 'orders.json'), 'r') as file:\n json_data = json.load(file)\n\n print(json_data)\n with open(os.path.join('assets', 'orders.json'), 'w') as file:\n json_data['orders'].append(data)\n\n print(json_data)\n json.dump(json_data, file, indent=4)\n\n print('Файл создан')\n\n\nwrite_order_to_json('Tuxedo', 12, 477, 'Sherlock', str(datetime.datetime.now()))\nwrite_order_to_json('Chair', 3, 200, 'Bill', str(datetime.datetime.now()))\nwrite_order_to_json('MacBook', 12, 1500, 'Harry', str(datetime.datetime.now()))\nwrite_order_to_json('Phone', 12, 500, 'Sergio', str(datetime.datetime.now()))\nwrite_order_to_json('Ball', 12, 100, 'Stephanie', str(datetime.datetime.now()))\n","sub_path":"hw2/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"474098394","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/watchme/watchers/urls/helpers.py\n# Compiled at: 2020-04-10 14:08:50\n# Size of source mod 2**32: 5295 bytes\n\"\"\"\n\nCopyright (C) 2019-2020 Vanessa Sochat.\n\nThis Source Code Form is subject to the terms of the\nMozilla Public License, v. 2.0. If a copy of the MPL was not distributed\nwith this file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n\"\"\"\nimport requests, re\n\ndef get_params(kwargs, key='url_param_'):\n \"\"\"a general function to get parameter sets based on a user input. \n Returns a list of dictionaries, one per set.\n\n Parameters\n ==========\n kwargs: the dictionary of keyword arguments that may contain url\n parameters (format is url_param_\n key: the string that the parameters start with (defaults to url_param)\n \"\"\"\n params = {}\n names = [x for x in kwargs if x.startswith(key)]\n for _, name in enumerate(names):\n paramlist = kwargs.get(name).split(',')\n name = name.replace(key, '', 1)\n for i, _ in enumerate(paramlist):\n if i not in params:\n params[i] = {}\n if paramlist[i] != '':\n params[i][name] = paramlist[i]\n\n params = [x for x in params.values()]\n if len(params) == 0:\n params = [{}]\n return params\n\n\ndef parse_success_response(response, kwargs):\n \"\"\"parse a successful response of 200, meaning we honor the user\n request to return json, search for a regular expression, or return\n raw text. This is used by the basic GET/POST functions. For parsing\n with beautiful soup, see \"get_results\" and \"get_url_selection\"\n\n Parameters\n ==========\n response: the requests (200) response\n kwargs: dictionary of keyword arguments provided to function\n \"\"\"\n result = None\n save_as = kwargs.get('save_as', 'json')\n regex = kwargs.get('regex')\n if save_as == 'json':\n result = response.json()\n else:\n if regex not in ('', None):\n match = re.search(regex, response.text)\n result = match.group()\n else:\n result = response.text\n return result\n\n\ndef get_headers(kwargs):\n \"\"\"Get a single set of headers from the kwargs dict. A user agent is added\n as it is helpful in most cases.\n\n Parameters\n ==========\n kwargs: the dictionary of keyword arguments that may contain url\n parameters (format is url_param_\n \"\"\"\n headers = {'User-Agent': 'Mozilla/5.0'}\n for key, value in kwargs.items():\n if key.startswith('header_'):\n name = key.replace('header_', '', 1)\n if value is not None:\n headers[name] = value\n elif value is None and name in headers:\n del headers[name]\n\n return headers\n\n\ndef get_results(url, selector, func=None, attributes=None, params=None, get_text=False, headers=None, regex=None):\n \"\"\"given a url, a function, an optional selector, optional attributes, \n and a set (dict) of parameters, perform a request. This function is\n used if the calling function needs special parsing of the html with\n beautiful soup. If only a post/get is needed, this is not necessary.\n\n Parameters\n ==========\n url: the url to get (required)\n func: the function to use, defaults to requests.get\n selector: selection for the html response\n attributes: optional, a list of attributes\n params: a dictionary of parameters\n headers: a dictionary of header key value pairs\n \"\"\"\n from bs4 import BeautifulSoup\n if params is None:\n params = {}\n if headers is None:\n headers = {}\n if func is None:\n func = requests.get\n response = func(url, params=params, headers=headers)\n results = []\n if response.status_code == 200:\n soup = BeautifulSoup(response.text, 'lxml')\n for entry in soup.select(selector):\n if attributes is not None:\n [results.append(entry.get(x)) for x in attributes]\n elif regex not in (None, ''):\n match = re.search(regex, entry.text)\n results.append(match.group())\n elif get_text:\n results.append(entry.text)\n else:\n results.append(str(entry))\n\n results = [x for x in results if x]\n return results","sub_path":"pycfiles/watchme-0.0.28-py3.7/helpers.cpython-37.py","file_name":"helpers.cpython-37.py","file_ext":"py","file_size_in_byte":4545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"347573854","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 18 09:26:54 2020\n\n@author: wangjingxian\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport array\nfrom sklearn import model_selection\n\nimport GCForest \n#from GCForest.gcForest import gcforest\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.model_selection import GridSearchCV\n\ndataset_train=pd.read_csv('E:\\data_mining\\eye_classification\\data\\data_add_train\\eeg_train_add_final3.csv')\n#print('训练数据特征:',dataset_train.shape)\ndataset_test=pd.read_csv('E:\\data_mining\\eye_classification\\data\\data_add_train\\eeg_test_diff_label3.csv')\n#\nX_train=dataset_train.iloc[:,0:14] \ny_train=dataset_train.iloc[:,[14]] \n\nX_train=np.array(X_train)\ny_train=np.array(y_train)\n\n#print(X_train,y_train)\nX_test=dataset_test.iloc[:,0:14]\n#index=dataset_test.iloc[:,[14]]\nX_test=np.array(X_test)\n\n'''\nparam_test2 = {'shape_1X':range(2,14,1), 'window':range(2,14,2)}\n#gsearch2 = GridSearchCV(GCForest.gcForest(n_mgsRFtree=30,param stride=1,cascade_test_size=0.2,n_cascadeRF=2,n_cascadeRFtree=101,min_samples_mgs=0.1,min_samples_cascade=0.1,tolerance=0.0,n_jobs=1),param_grid = param_test2, scoring='roc_auc',iid=False, cv=7)\ngsearch2 = GridSearchCV(estimator = GCForest.gcForest(), \n param_grid = param_test2, scoring='roc_auc',cv=5)\ngsearch2.fit(X_train,y_train)\nprint(gsearch2.cv_results_, gsearch2.best_params_, gsearch2.best_score_)\n#{'max_depth': 21, 'min_samples_split': 2} 0.9176631969050988\n'''\n\n\n#shape_1X:取值1-14\n#model = GCForest.gcForest(shape_1X=8,window=8)\n\nmodel = GCForest.gcForest(shape_1X=8,window=8,n_mgsRFtree=60,n_cascadeRF=4,n_cascadeRFtree=70)\n\n\nmodel.fit(X_train, y_train) #fit(X,y) 在输入数据X和相关目标y上训练gcForest;\n\n#print ('袋外样本来评估模型:',model.oob_score_)\ny_predprob = model.predict_proba(X_train)[:,1]\nprint(y_predprob)\nprint (\"AUC Score (Train): %f\" % metrics.roc_auc_score(y_train, y_predprob))\n\n\n\n'''\n#进行交叉验证\nscores = model_selection.cross_val_score(\n model,\n X_train,\n y_train,\n cv=7\n)\n'''\npredictions=model.predict(X_test)\npredictions_proba=model.predict_proba(X_test)\npredictions_proba1=predictions_proba.max(axis=1)\n\nprint(model.predict(X_test))\nprint(model.predict_proba(X_test))\n\nsubmission = pd.DataFrame({\n 'index':dataset_test['index'],\n 'probability':predictions_proba1,\n 'label':predictions\n })\n\nsubmission.to_csv('E:\\\\data_mining\\\\eye_classification\\\\result\\\\GCForest\\\\GCForest_predict4.csv',index=False)\n\n\n'''\ny_predict = model.predict_proba(X_test) #预测未知样本X的类概率;\n#y_predict = y_predict.tolist()\n\ny_predict1 = model.predict(X_test) #预测未知样本X的类别;\npredictions_proba1=predictions_proba.max(axis=1)\nprint('预测的分类结果:\\n',y_predict1)\nprint(\"---每个样本对应每个类别的概率---\")\nprint(y_predict)\n'''\n\n \n ","sub_path":"single_algorithm_model/GCForest/GCForest_model .py","file_name":"GCForest_model .py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"211238529","text":"from autolens.array import grids, mask as msk\nfrom autolens.model.profiles import mass_profiles as mp\nfrom autolens.lens import lens_data as ld\n\nfrom autolens.plotters import array_plotters\n\nfrom test.simulation import simulation_util\n\nimport numpy as np\n\n# Although we could test the deflection angles without using an image (e.al. by just making a grid), we have chosen to\n# set this test up using an image and mask. This gives run-time numbers that can be easily related to an actual lens\n# analysis\n\nsub_size = 2\ninner_radius_arcsec = 0.2\nouter_radius_arcsec = 4.0\n\nprint(\"sub grid size = \" + str(sub_size))\nprint(\"annular inner mask radius = \" + str(inner_radius_arcsec) + \"\\n\")\nprint(\"annular outer mask radius = \" + str(outer_radius_arcsec) + \"\\n\")\n\nfor data_resolution in [\"HST_Up\"]:\n\n print()\n\n imaging_data = simulation_util.load_test_imaging_data(\n data_type=\"lens_mass__source_smooth\",\n data_resolution=data_resolution,\n psf_shape=(3, 3),\n )\n mask = al.Mask.circular_annular(\n shape=imaging_data.shape,\n pixel_scale=imaging_data.pixel_scale,\n inner_radius_arcsec=inner_radius_arcsec,\n outer_radius_arcsec=outer_radius_arcsec,\n )\n lens_data = al.LensData(imaging_data=imaging_data, mask=mask, sub_size=sub_size)\n\n print(\"Deflection angle run times for image type \" + data_resolution + \"\\n\")\n print(\"Number of points = \" + str(lens_data.grid.shape[0]) + \"\\n\")\n\n interpolator = al.Interpolator.from_mask_grid_and_pixel_scale_interpolation_grids(\n mask=lens_data.mask, grid=lens_data.grid, pixel_scale_interpolation_grid=0.05\n )\n\n print(\n \"Number of interpolation points = \"\n + str(interpolator.interp_grid.shape[0])\n + \"\\n\"\n )\n\n ### EllipticalIsothermal ###\n\n mass_profile = al.EllipticalIsothermal(\n centre=(0.0, 0.0), axis_ratio=0.8, phi=45.0, einstein_radius=1.0\n )\n\n interp_deflections = mass_profile.deflections_from_grid(\n grid=interpolator.interp_grid\n )\n deflections = np.zeros((lens_data.grid.shape[0], 2))\n deflections[:, 0] = interpolator.interpolated_values_from_values(\n values=interp_deflections[:, 0]\n )\n deflections[:, 1] = interpolator.interpolated_values_from_values(\n values=interp_deflections[:, 1]\n )\n\n true_deflections = mass_profile.deflections_from_grid(grid=lens_data.grid)\n\n true_deflections_y_2d = lens_data.grid.scaled_array_2d_with_sub_dimensions_from_sub_array_1d(\n sub_array_1d=true_deflections[:, 0]\n )\n true_deflections_x_2d = lens_data.grid.scaled_array_2d_with_sub_dimensions_from_sub_array_1d(\n sub_array_1d=true_deflections[:, 1]\n )\n\n difference_y = deflections[:, 0] - true_deflections[:, 0]\n difference_x = deflections[:, 1] - true_deflections[:, 1]\n\n print(\"interpolation y error: \", np.mean(difference_y))\n print(\"interpolation y uncertainty: \", np.std(difference_y))\n print(\"interpolation y max error: \", np.max(difference_y))\n print(\"interpolation x error: \", np.mean(difference_x))\n print(\"interpolation x uncertainty: \", np.std(difference_x))\n print(\"interpolation x max error: \", np.max(difference_x))\n\n difference_y_2d = lens_data.grid.scaled_array_2d_with_sub_dimensions_from_sub_array_1d(\n sub_array_1d=difference_y\n )\n difference_x_2d = lens_data.grid.scaled_array_2d_with_sub_dimensions_from_sub_array_1d(\n sub_array_1d=difference_x\n )\n\n array_plotters.plot_array(array=true_deflections_y_2d)\n array_plotters.plot_array(array=difference_y_2d)\n\n array_plotters.plot_array(array=true_deflections_x_2d)\n array_plotters.plot_array(array=difference_x_2d)\n\n # difference_percent_y = (np.abs(difference_y) / np.abs(true_deflections[:,0]))*100.0\n # difference_percent_x = (np.abs(difference_x) / np.abs(true_deflections[:,1]))*100.0\n #\n # print(\"interpolation y mean percent difference: \", np.mean(difference_percent_y))\n # print(\"interpolation y std percent difference: \", np.std(difference_percent_y))\n # print(\"interpolation y max percent difference: \", np.max(difference_percent_y))\n # print(\"interpolation x mean percent difference: \", np.mean(difference_percent_x))\n # print(\"interpolation x std percent difference: \", np.std(difference_percent_x))\n # print(\"interpolation x mean percent difference: \", np.max(difference_percent_x))\n #\n # difference_percent_y_2d = lens_data.grid.scaled_array_2d_with_sub_dimensions_from_sub_array_1d(\n # sub_array_1d=difference_percent_y)\n # difference_percent_x_2d = lens_data.grid.scaled_array_2d_with_sub_dimensions_from_sub_array_1d(\n # sub_array_1d=difference_percent_x)\n #\n # array_plotters.plot_array(array=difference_percent_y_2d)\n # array_plotters.plot_array(array=difference_percent_x_2d)\n","sub_path":"test/precision/interpolation/precision.py","file_name":"precision.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"350766456","text":"import numpy as np\nimport os\nimport sys\nimport progressbar\nimport gc\nimport pickle\nfrom LSTM import LSTM\nfrom wrapper import Bidirectional\nfrom Regularization import regularization\nfrom attention_model import attention_model\nfrom data_preprocessing import song_preprocessing\nfrom functions import activations as act, helper_func as func\nfrom sklearn.preprocessing import normalize\n\n\nclass pre_model:\n def __init__(self, X, Y, Tx, Ty, lr = 0.005, n_a = 64, epoch = 100, optimizer = None):\n self.X = X\n self.Y = Y\n self.Tx = Tx\n self.Ty = Ty\n self.lr = lr\n self.n_a = n_a\n self.n_x = X.shape[1]\n self.n_y = Y.shape[1]\n self.epoch = epoch\n self.last_layer_hidden_state = None\n # Wy shape = (n_s,n_y)\n self.Wy = func.xavier((self.n_a, self.n_y))\n self.by = np.zeros((1, self.n_y))\n self.optimizer = optimizer\n self.s_weight = 0\n self.s_bias = 0\n self.v_weight = 0\n self.v_bias = 0\n self.TRAINING_THRESHOLD = 0\n self._params = {\"Wy\": self.Wy, \"by\": self.by}\n\n self.pre_LSTM = LSTM(\"pre_LSTM\", (self.Tx, self.n_x), (self.Tx, self.n_a), optimizer = optimizer, is_dropout = True)\n\n def forward_propagation_one_ex(self, e):\n \"\"\"\n description:\n forward propagation for one training example; data x label y\n ---parameter---\n i: index\n \"\"\"\n # self.gradient_checking()\n A = self.pre_LSTM.forward_propagation(self.X) # shape = (Tx, 2 * n_a)\n\n self.last_layer_hidden_state = A\n # TODO: dropout A\n #A = np.array(act.dropout(A, level=0.8)[0])\n\n # TODO: dropout lstm_S\n # lstm_S = act.dropout(lstm_S, level = 0.5)\n # initialize last layer Wy\n # st shape = (1,n_s)\n Y_hat = []\n print(\"Predicting Y\")\n for t in progressbar.progressbar(range(self.Ty)): # st shape = (1, n_s)\n Zy = np.matmul(np.atleast_2d(A[t,:]), self._params[\"Wy\"]) + self._params[\"by\"] # shape = (1, n_y)\n yt_hat = act.softmax(Zy)\n Y_hat.append(yt_hat.reshape(-1)) # yt_hat after reshape = (n_y,)\n\n # Y_hat shape = (Ty, n_y)\n Y_true = np.array(self.Y) # (Ty, n_y)\n Y_hat = np.array(Y_hat)\n total_lost = 0\n print(\"Lost....\")\n for t in range(self.Ty):\n lost = func.t_lost(Y_true[t,:], Y_hat[t,:])\n total_lost = total_lost + lost\n\n total_lost = (total_lost/self.Ty)\n print(\"loss: \", total_lost)\n return total_lost, Y_hat, Y_true\n\n def backward_propagation_one_ex(self, Y_hat, Y_true, e, lr):\n \"\"\"\n Description:\n backward propagation for one training example; data x label y\n ----parameter---\n Y_hat: predicted value given training data X\n Y_true: True label value of training data X\n \"\"\"\n # dL = (1/self.Ty)\n # shape (Ty, n_y)\n dZ = (Y_hat - Y_true)\n assert(dZ.shape == (self.Ty, self.n_y))\n # calculate dWy and dby\n dWy = np.matmul(np.transpose(self.last_layer_hidden_state.reshape(self.Ty, self.n_a)), dZ)\n dby = np.atleast_2d(np.sum(dZ, axis = 0))\n self.update_weight(dWy, dby, e, lr, optimizer = self.optimizer)\n\n assert(dWy.shape == (self.n_a, self.n_y) and dby.shape == (1, self.n_y))\n #shape = (Ty, n_a)\n dA = np.matmul(dZ, np.transpose(self._params[\"Wy\"]))\n self.pre_LSTM.backward_propagation(dA)\n\n def update_weight(self, dWy, dby, i ,lr=0.005, optimizer = None, beta1 = 0.9, beta2 = 0.999, eps = 1e-8):\n\n i = i + 1\n lr = lr * np.sqrt(1-beta2**i)/(1-beta1**i)\n s_corrected_weight = None\n s_corrected_bias = None\n v_corrected_weight = None\n v_corrected_bias = None\n if optimizer == \"Adam\":\n self.s_weight = beta2 * self.s_weight + (1 - beta2) * (dWy ** 2)\n s_corrected_weight = self.s_weight / (1 - beta2**i)\n self.s_bias = beta2 * self.s_bias + (1 - beta2) * (dby ** 2)\n s_corrected_bias = self.s_bias / (1 - beta2**i)\n\n self.v_weight = beta1 * self.v_weight + (1 - beta1) * dWy\n v_corrected_weight = self.v_weight / (1 - beta1**i)\n self.v_bias = beta1 * self.v_bias + (1 - beta1) * dby\n v_corrected_bias = self.v_bias / (1 - beta1**i)\n\n self.Wy = self.Wy - lr*(v_corrected_weight/(np.sqrt(s_corrected_weight) + eps))\n self.by = self.by - lr*(v_corrected_bias/(np.sqrt(s_corrected_bias) + eps))\n else:\n self.Wy = self.Wy - lr*dWy\n self.by = self.by - lr*dby\n\n self._params[\"Wy\"] = self.Wy\n self._params[\"by\"] = self.by\n\n self.save_weights()\n\n def save_weights(self):\n with open(\"weights_pre_song/predict_layer.pickle\", \"wb\") as f:\n pickle.dump(self._params, f, protocol = pickle.HIGHEST_PROTOCOL)\n\n def train(self):\n lr = self.lr\n print(\"Starting to train Detector..........\")\n for e in range(self.epoch):\n print(\"Epoch {}/{}\".format(e, self.epoch))\n\n\n total_lost, Y_hat, Y_true = self.forward_propagation_one_ex(e)\n print(\"Total Lost: \", total_lost)\n self.backward_propagation_one_ex(Y_hat, Y_true, e, lr)\n","sub_path":"python/pre_song_model.py","file_name":"pre_song_model.py","file_ext":"py","file_size_in_byte":5303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"29772684","text":"from .styles import Margins, unitConverter\nfrom bs4 import Tag\nfrom .textFormater import bold, italic\n\ndef debug_printer(item):\n if isinstance(item, element):\n res = '<{0}>:'.format(item.type)\n for x in item.children:\n res += '\\n\\t' + debug_printer(x).replace('\\n', '\\n\\t')\n return res\n else:\n return 'text: ' + item.string.replace(' ', '_')\n\n\n\nclass text():\n \"\"\"docstring for text.\"\"\"\n def __init__(self, string, style):\n if style.get('font-weight') == 'bold':\n string = bold(string)\n if style.get('font-style') in ['oblique', 'italic']:\n string = italic(string)\n try:\n string = string.replace('\\t', ' ')\n except:\n pass\n self.string = string\n def render(self, force_inline = False):\n return self.string\n\ndef len_str_list(lis):\n res = 0\n for x in lis:\n res += len(lis)\n return res\n\nclass element():\n \"\"\"Represents any `display-type: block;`\n has nested in-line elements or blocks\"\"\"\n def __init__(self, tag, stylesheet, rootWidth, minimal_content_width = 5, tags_to_ignore = []):\n self.style = stylesheet.get(tag)\n self.margins = self.style.get('margins', Margins())\n self.type = self.style.get('display', 'in-line')\n self.children = list()\n self.width = rootWidth\n self.minimal_content_width = minimal_content_width\n for child in tag.children:\n if isinstance(child, Tag):\n if tag.name not in tags_to_ignore:\n self.children.append(element(child, stylesheet, self.content_width(), tags_to_ignore = tags_to_ignore))\n else:\n if len(child.strip()) > 0:\n self.children.append(text(child, self.style))\n def content_width(self):\n x = self.width - self.margins.left - self.margins.right\n if x > self.minimal_content_width:\n return x\n else:\n return self.minimal_content_width\n def make_paragraph(self, text_children):\n if len(text_children) > 0:\n # handle text_children\n text = str()\n for child in text_children:\n text += child.render()\n text = [x.strip() for x in text.split('\\n')]\n text_str = str()\n for x in text:\n text_str += x + ' '\n text_str = text_str[:len(text_str) -1]\n i = 0\n while True:\n try:\n if text_str[i] == ' ' and text_str[i+1] == ' ':\n text_str = text_str[:i] + text_str[i:]\n i += 1\n except IndexError:\n break\n # wrap the text into a paragraph\n lines = list()\n # set first line to indent\n current_line = ' ' * unitConverter(self.style.get('text-indent'))\n for word in text_str.split(' '):\n if len(current_line) + len(word) + 1 < self.content_width():\n current_line += ' ' + word\n else:\n lines.append(current_line)\n current_line = word\n lines.append(current_line)\n alingment = self.style.get('text-align')\n if alingment == 'center':\n lines = [x.center(self.content_width()) for x in lines]\n elif alingment == 'right':\n lines = [x.rjust(self.content_width()) for x in lines]\n elif alingment == 'justify':\n new_lines = list()\n for line in lines[:len(lines) - 1]:\n line = line.split(' ')\n i = 0\n while len_str_list(line) + len(line) - 1 < self.content_width():\n try:\n line[i] += ' '\n i += 1\n except IndexError:\n break\n new_line = str()\n for word in line[:len(line) - 1]:\n new_line += word + ' '\n new_line += line[len(line) - 1]\n new_lines.append(new_line)\n new_lines.append(lines[len(lines) - 1])\n lines = new_lines\n elif alingment == 'left':\n lines = [x.ljust(self.content_width()) for x in lines]\n res = str()\n for line in lines:\n res += line + '\\n'\n return res\n else:\n return str()\n def render(self, force_inline = False):\n if self.type == 'in-line' or force_inline:\n text_str = str()\n for child in self.children:\n post_render = child.render(force_inline = True)\n if isinstance(post_render, text):\n text_str += post_render.render()\n else:\n text_str += post_render\n return text(text_str, self.style)\n elif self.type in ['block', 'list-item']:\n result = str()\n text_children = list()\n last_margin_bottom_len = 0\n for child in self.children:\n if isinstance(child, text):\n text_children.append(child)\n elif child.type == 'in-line':\n text_children.append(child.render())\n else:\n post_render = child.render()\n result += self.make_paragraph(text_children)\n text_children = list()\n if isinstance(post_render, text):\n post_render = child.make_paragraph([post_render])\n # add side margins to child\n post_render = ' '*child.margins.left + post_render.replace('\\n', ' '*child.margins.right + '\\n' + ' '*child.margins.left, post_render.count('\\n') - 1)\n\n result += '\\n'*max(last_margin_bottom_len, child.margins.top) + post_render\n last_margin_bottom_len = child.margins.bottom\n result += self.make_paragraph(text_children)\n # add side margins\n result = ' '*self.margins.left + result.replace('\\n', ' '*self.margins.right + '\\n' + ' '*self.margins.left, result.count('\\n') - 1)\n #print(self.style, '\\n', result.replace(' ', '~'), '\\n', self.margins, '\\n----\\n')\n return result\n else:\n return str()\n\ndef render(body, styleDict, rootWidth, ignored_tags = []):\n x = element(body, styleDict, rootWidth, tags_to_ignore = ignored_tags)\n return x.render()\n","sub_path":"tinyPub/htmlParser/renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":6631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"52439507","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport pdb\n\n# input comes from STDIN (standard input)\nfile1=sys.argv[1]\nnbr_reduces=sys.argv[2]\nsorted_part_nbr=[]\nk=[]\n\nf=open(file1,\"r\")\npart_nbr=[[] for _ in range(int(nbr_reduces))]\n\nfor line in f:\n # remove leading and trailing whitespace\n line = line.strip()\n # split the line into words\n words = line.split()\n # increase counters\n for word in words:\n l=hash(word)%int(nbr_reduces)\n part_nbr[l].append(word + \"DELIM,1\\n\" )\n \n\nfor i in range(0,int(nbr_reduces)):\n p_name=\"part-\"+str(i) \n sorted_part_nbr.append(open( file1 + \"sorted-\"+ p_name,'w'))\n\nfor i in range(0,int(nbr_reduces)):\n part_nbr[i].sort()\n for l in part_nbr[i]:\n sorted_part_nbr[i].write(l) \n\nfor i in range(0,int(nbr_reduces)): \n sorted_part_nbr[i].close()\n\n","sub_path":"applications/MapReduce/branches/MapReduce_Python/source/applications/wordcount/wordcount_map_partition.py","file_name":"wordcount_map_partition.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"306279340","text":"from sqlalchemy import create_engine, asc\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom database_setup import Base, Category, CategoryItem, User\r\n\r\nengine = create_engine('postgresql://catalog:catalog@localhost/catalog')\r\nBase.metadata.bind = engine\r\n\r\nDBSession = sessionmaker(bind=engine)\r\nsession = DBSession()\r\n\r\nUser1 = User(name=\"Rawag\", email=\"abdelrahmanrawaj@gmail.com\",\r\n picture='https://scontent.fcai3-1.fna.fbcdn.net/v/t1.0-9/23472730_1586239558089651_5567458259598829223_n.jpg?oh=9a62bd0b19b143462f639899059ded12&oe=5B067529')\r\nsession.add(User1)\r\nsession.commit()\r\n\r\ncategory = Category(name=\"FootBall\")\r\nsession.add(category)\r\nsession.commit()\r\n\r\nitem = CategoryItem(name=\"Ball\",\r\n description=\"A ball is a round object (usually spherical but sometimes ovoid) with various uses.\"\r\n \" It is used in ball games, where the play of the game follows the state of the ball as it is hit,\"\r\n \" kicked or thrown by players. Balls can also be used for simpler activities,\"\r\n \" such as catch, marbles and juggling\", category_id=category.id, user_id=User1.id);\r\nsession.add(item)\r\nsession.commit()\r\n\r\nitem = CategoryItem(name=\"T-shirt\",\r\n description=\"A T-shirt (or t shirt, or tee) is a style of unisex fabric shirt named after the T shape of its body and sleeves.\"\r\n \" It normally has short sleeves and a round neckline, known as a crew neck,\"\r\n \" which lacks a collar.\", category_id=category.id, user_id=User1.id);\r\nsession.add(item)\r\nsession.commit()\r\n\r\ncategory = Category(name=\"Rowing\")\r\nsession.add(category)\r\nsession.commit()\r\n\r\nitem = CategoryItem(name=\"Boat\",\r\n description=\"A boat is a watercraft of a large range of sizes designed to float, plane, work or travel on water.\"\r\n \" Small boats are typically found on inland waterways (e.g. rivers and lakes) or in protected coastal areas.\"\r\n \" Another definition is a vessel that can be lifted out of the water.\",\r\n category_id=category.id, user_id=User1.id);\r\nsession.add(item)\r\nsession.commit()\r\n\r\nitem = CategoryItem(name=\"Watch\",\r\n description=\"Computer Definition. A rugged, water-resistant wristwatch that includes features such as an alarm, stopwatch,\"\r\n \" compass, heart rate monitor, tachymeter (rotating bezel for calculating speed),\"\r\n \" thermometer and tide indicator (for divers).\", category_id=category.id,\r\n user_id=User1.id);\r\nsession.add(item)\r\nsession.commit()\r\n\r\ncategory = Category(name=\"HandBall\")\r\nsession.add(category)\r\nsession.commit()\r\n\r\nitem = CategoryItem(name=\"Ball\",\r\n description=\"A ball is a round object (usually spherical but sometimes ovoid) with various uses.\"\r\n \" It is used in ball games, where the play of the game follows the state of the ball as it is hit,\"\r\n \" kicked or thrown by players. Balls can also be used for simpler activities, such as catch,\"\r\n \" marbles and juggling.\", category_id=category.id, user_id=User1.id);\r\n\r\nsession.add(item)\r\nsession.commit()\r\n\r\nitem = CategoryItem(name=\"Ball\",\r\n description=\"On some shoes, the heel of the sole has a rubber plate for durability and traction,\"\r\n \" while the front is leather for style. ... The heel is the bottom rear part of a shoe.\"\r\n \" Its function is to support the heel of the foot.\"\r\n \" They are often made of the same material as the sole of the shoe.\",\r\n category_id=category.id, user_id=User1.id);\r\nsession.add(item)\r\nsession.commit()\r\n\r\ncategory = Category(name=\"Boxing\")\r\nsession.add(category)\r\nsession.commit()\r\n\r\nitem = CategoryItem(name=\"Gloves\",\r\n description=\"On some shoes, the heel of the sole has a rubber plate for durability and traction,\"\r\n \" while the front is leather for style. ... The heel is the bottom rear part of a shoe.\"\r\n \" Its function is to support the heel of the foot.\"\r\n \" They are often made of the same material as the sole of the shoe.\",\r\n category_id=category.id, user_id=User1.id);\r\nsession.add(item)\r\nsession.commit()\r\n\r\nitem = CategoryItem(name=\"Helmet\",\r\n description=\"A helmet is a form of protective gear worn to protect the head from injuries.\"\r\n \" More specifically, a helmet aids the skull in protecting the human brain. \"\r\n \"Ceremonial or symbolic helmets (e.g. UK policeman's helmet)\"\r\n \" without protective function are sometimes used.\", category_id=category.id,\r\n user_id=User1.id);\r\nsession.add(item)\r\nsession.commit()\r\n\r\ncategory = Category(name=\"Tennis\")\r\nsession.add(category)\r\nsession.commit()\r\n\r\nitem = CategoryItem(name=\"Ball\",\r\n description=\"A ball is a round object (usually spherical but sometimes ovoid) with various uses.\"\r\n \" It is used in ball games, where the play of the game follows the state of the ball as it is hit,\"\r\n \" kicked or thrown by players. Balls can also be used for simpler activities, such as catch,\"\r\n \" marbles and juggling.\", category_id=category.id, user_id=User1.id);\r\nsession.add(item)\r\nsession.commit()\r\n\r\nitem = CategoryItem(name=\"Paddle\",\r\n description=\"Padel is a racquet sport. In the US and Canada the sport is known as Paddle.\"\r\n \" Padel is not to be confused with Platform Tennis, a winter and summer sport typically played at country clubs in the US and Canada,\"\r\n \" with courts heated from below to eliminate snow and water.\", category_id=category.id,\r\n user_id=User1.id);\r\nsession.add(item)\r\nsession.commit()\r\n\r\ncategory = Category(name=\"Swimming\")\r\nsession.add(category)\r\nsession.commit()\r\n\r\nitem = CategoryItem(name=\"Glasses\",\r\n description=\"Goggles, or safety glasses, are forms of protective eyewear that usually enclose or protect the area surrounding the eye in order to prevent particulates,\"\r\n \" water or chemicals from striking the eyes. They are used in chemistry laboratories and in woodworking.\"\r\n \" They are often used in snow sports as well, and in swimming.\",\r\n category_id=category.id, user_id=User1.id);\r\nsession.add(item)\r\nsession.commit()\r\n\r\nitem = CategoryItem(name=\"Suit\",\r\n description=\"A one-piece swimsuit most commonly refers to swimwear worn by women and girls when swimming in the sea or in a swimming pool,\"\r\n \" or for any activity in the sun, such as sun bathing. Today, the one-piece swimsuit is usually a skin-tight garment that covers a female's torso,\"\r\n \" except maybe the back or upper chest.\", category_id=category.id, user_id=User1.id);\r\nsession.add(item)\r\nsession.commit()\r\n","sub_path":"AddingSomeData.py","file_name":"AddingSomeData.py","file_ext":"py","file_size_in_byte":7478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"543887852","text":"from __future__ import print_function\n\nfrom dataset.synth.dwi import SynthProcessor\nfrom fwk.config import Config\n\nfrom util.iterators import chunk_iterator\nfrom util.lang import to_bool\n\n\nclass SynthProcessorScript:\n\n def execute(self):\n\n dry_run = Config.get_option('OUTPUTS', 'dry_run', cast_function=to_bool, default=False)\n\n processor = SynthProcessor(dry_run=dry_run)\n\n subjects, batch_index, number_of_batches = self.subject_iterator()\n\n print(f'\\n**** Processing batch {batch_index + 1} of {number_of_batches}\\n')\n\n for subject in subjects:\n print('processing subject {}'.format(subject))\n processor.process_subject(subject)\n\n def subject_iterator(self):\n\n number_of_subjects = int(Config.config['SUBJECTS']['number_of_subjects'])\n batch_index = int(Config.config['SUBJECTS']['subject_batch_index'])\n number_of_batches = int(Config.config['SUBJECTS']['number_of_batches'])\n max_sub = Config.get_option('SUBJECTS', 'max_subjects_per_batch', cast_function=int, default=None)\n\n subjects = chunk_iterator(batch_index, number_of_subjects, number_of_batches, max_sub)\n\n return subjects, batch_index, number_of_batches\n","sub_path":"dataset/synth/script/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"157142886","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/sanhehu/Documents/GitHub/crawlib-project/crawlib/example/scrapy_movie/items.py\n# Compiled at: 2019-12-25 23:33:52\n# Size of source mod 2**32: 1439 bytes\nimport mongoengine as me, scrapy, pymongo\nfrom mongoengine_mate import ExtendedDocument\nfrom .config import Config\nfrom .db import client, db\nc_movie_listpage = db['movie_listpage']\nc_movie = db['movie']\n\nclass MovieListPage(ExtendedDocument):\n _id = me.fields.IntField(primary_key=True)\n status = me.fields.IntField()\n edit_at = me.fields.DateTimeField()\n meta = dict(collection='site_movie_listpage',\n db_alias=(Config.MongoDB.database))\n\n\nclass ScrapyMovieListpageItem(scrapy.Item):\n _id = scrapy.Field()\n status = scrapy.Field()\n edit_at = scrapy.Field()\n\n def build_url(self):\n return '{}/movie/listpage/{}'.format(Config.Url.domain, self._id)\n\n def process(self):\n c_movie_listpage.update_one(filter={'_id': self['_id']},\n update={'$set': dict(self)},\n upsert=True)\n\n\nclass ScrapyMovieItem(scrapy.Item):\n _id = scrapy.Field()\n title = scrapy.Field()\n status = scrapy.Field()\n edit_at = scrapy.Field()\n\n def process(self):\n c_movie.update_one(filter={'_id': self['_id']},\n update={'$set': dict(self)},\n upsert=True)","sub_path":"pycfiles/crawlib-0.1.1-py2.py3-none-any/items.cpython-36.py","file_name":"items.cpython-36.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"340659336","text":"import configparser\n\n\nAPI_URL = 'https://www.goodreads.com'\n\nrequest_token_url = '{}/oauth/request_token'.format(API_URL)\nauthorize_url = '{}/oauth/authorize'.format(API_URL)\naccess_token_url = '{}/oauth/access_token'.format(API_URL)\nauth_user_url = '{}/api/auth_user'.format(API_URL)\nfriend_list_url = '{}/friend/user'.format(API_URL)\nshelves_list_url = '{}/shelf/list.xml'.format(API_URL)\nfollowers_list_url = '{}/user/USER_ID/followers.xml'.format(API_URL)\nfollowing_list_url = '{}/user/USER_ID/following.xml'.format(API_URL)\nbooks_owned_url = '{}/owned_books/user?format=xml&id=USER_ID'.format(API_URL)\nshow_owned_book_url = '{}/owned_books/show/OWNED_BOOK_ID?format=xml'\\\n .format(API_URL)\n\nCONFIG_FILE = 'config.ini'\nconfig = configparser.ConfigParser()\nconfig.read(CONFIG_FILE)\n\napi_key = config['DEFAULT']['GOODREADS_API_KEY']\napi_secret = config['DEFAULT']['GOODREADS_API_SECRET']\nuser_email = config['DEFAULT']['USER_EMAIL']\n","sub_path":"goodreads_tools/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"24338988","text":"# dataloader add 3.0 scale\n# dataloader add filer text\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils import data\nimport util\nimport cv2\nimport random\nimport torchvision.transforms as transforms\nimport torch\nimport config\n\n# ic15_root_dir = '/home/gem/phucph/PixelLink.pytorch/dataset/\nic15_root_dir = 'dataset/'\nic15_test_data_dir = ic15_root_dir + 'test_images/'\n# ic15_test_gt_dir = ic15_root_dir + 'test_gt/'\n\nrandom.seed(123456)\n\ndef get_img(img_path):\n try:\n img = cv2.imread(img_path)\n img = img[:, :, [2, 1, 0]]\n \n except Exception as e:\n print(img_path)\n raise\n return img \n\ndef scale(img, long_size=2480):\n h, w = img.shape[0:2]\n if max(h,w) > long_size:\n scale = long_size * 1.0 / max(h, w)\n h,w=h*scale,w*scale\n \n sw=int(w/32)*32\n sh=int(h/32)*32\n img = cv2.resize(img, dsize=(sw,sh))\n return img\n\nclass IC15TestLoader(data.Dataset):\n def __init__(self, part_id=0, part_num=1, long_size=2240):\n data_dirs = [ic15_test_data_dir]\n \n self.img_paths = []\n \n for data_dir in data_dirs:\n img_names = util.io.ls(data_dir, '.jpg')\n img_names.extend(util.io.ls(data_dir, '.png'))\n\n img_paths = []\n for idx, img_name in enumerate(img_names):\n img_path = data_dir + img_name\n img_paths.append(img_path)\n \n self.img_paths.extend(img_paths)\n\n part_size = int(len(self.img_paths) / part_num)\n l = part_id * part_size\n r = (part_id + 1) * part_size\n self.img_paths = self.img_paths[l:r]\n self.long_size = config.test_long_size # config longsize test\n\n def __len__(self):\n return len(self.img_paths)\n\n def __getitem__(self, index):\n img_path = self.img_paths[index]\n\n img= get_img(img_path)\n # print(\"size:\",height,weight)\n scaled_img = scale(img, self.long_size)\n cv2.imwrite('outputs/' + img_path.split('/')[-1], scaled_img)\n scaled_img = Image.fromarray(scaled_img)\n scaled_img = scaled_img.convert('RGB')\n scaled_img = transforms.ToTensor()(scaled_img)\n scaled_img = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(scaled_img)\n \n return img[:, :, [2, 1, 0]], scaled_img","sub_path":"dataset/icdar2015_test_loader.py","file_name":"icdar2015_test_loader.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"616088199","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n##########################################################################\n# NSAp - Copyright (C) CEA, 2015\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html for details.\n##########################################################################\n\n\nfrom .utils import create_parameter_file, run_connectomist\n\n\ndef dwi_outlier_detection(output_directory,\n raw_dwi_directory,\n rough_mask_directory):\n \"\"\"\n Wrapper to Connectomist's \"Outliers\" tab.\n\n Parameters\n ----------\n output_directory: Str, path to Connectomist output work directory.\n raw_dwi_directory: Str, path to Connectomist Raw DWI folder.\n rough_mask_directory: Str, path to Connectomist Rough Mask folder.\n\n \n \n\n \n \n \n \n \"\"\"\n\n algorithm_name = \"DWI-Outlier-Detection\"\n\n parameters_value = {'rawDwiDirectory': raw_dwi_directory,\n 'roughMaskDirectory': rough_mask_directory,\n 'outputWorkDirectory': output_directory,\n '_subjectName': '',\n 'discardedOrientationList': '',\n 'outlierFactor': 3.0}\n\n parameter_file = create_parameter_file(algorithm_name,\n parameters_value,\n output_directory)\n run_connectomist(algorithm_name, parameter_file)\n\n # Capsul needs the output name to be different from input arguments\n outliers_directory = output_directory\n return outliers_directory\n","sub_path":"clindmri/preproc/connectomist/outliers.py","file_name":"outliers.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"377628432","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2/7/18 8:04 PM \n\n@author: Hantian Liu\n\"\"\"\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom UKF import UKF, init\nfrom fromRaw import getAcc, getAngularVelocity\nfrom utils import quart2mat\nfrom panorama import warp\nfrom noFilter import rotationMatrixToEulerAngles, onlyAcc, onlyGyro\nimport numpy as np\nfrom scipy import io\nimport os, transforms3d, cv2, math\nfrom sync import sync_to_cam, sync_gt\n\n\n############################\n## MODIFY THESE VARIABLES ##\n############################\ncamfolder = \"./data/cam\"\nimufolder = \"./data/imu\"\nviconfolder = \"./data/vicon\"\nmax_dataset_num=13\n\nvicon_exists=False\n\nshow_rpy_plots=False\nshow_imu_only_rpy_plots=True\n\nshow_panorama=True\nshow_panorama_from_vicon=True\n\nfirst_still=0\nlast_still=0\n############################\n\n\n#pixel scale up for panorama\nscale=500\n#final panorama canvas size\ncanvassize=np.array([1800,3600,3])\n# tolerance for the magnitude of valid acc measurements\nepsilon=0.008\n\ndef showAllResults():\n\t#total = #len(os.listdir(imufolder))\n\tfor datanum in range(11, max_dataset_num + 1):\n\t\tprint('Data number '+str(datanum))\n\t\timuname=os.path.join(imufolder, \"imuRaw\" + str(datanum) + '.mat')\n\t\tif not os.path.isfile(imuname):\n\t\t\tcontinue\n\t\timu = io.loadmat(imuname)\n\t\timu_ts = imu['ts']\n\t\timu_ts = imu_ts[0, :]\n\t\timuraw = imu['vals']\n\n\t\tif vicon_exists:\n\t\t\tvicon = io.loadmat(os.path.join(viconfolder, \"viconRot\" + str(datanum) + '.mat'))\n\t\t\tgt_ts = vicon['ts']\n\t\t\tgt_ts = gt_ts[0, :]\n\t\t\tgt = vicon['rots']\n\t\t\tgt_synced, imu_synced, imu_ts = sync_gt(gt_ts, imu_ts, gt, imuraw)\n\t\t\tr_gt = []\n\t\t\tp_gt = []\n\t\t\ty_gt = []\n\t\telse:\n\t\t\timu_synced=imuraw\n\n\t\tprint('converting raw data to values')\n\t\tox, oy, oz = getAngularVelocity(imu_synced)\n\t\tax, ay, az = getAcc(imu_synced)\n\t\tR=[]\n\t\tr_my = []\n\t\tp_my = []\n\t\ty_my = []\n\n\t\tr_a = []\n\t\tp_a = []\n\t\ty_a = []\n\t\tr_g = []\n\t\tp_g = []\n\t\ty_g = []\n\t\tq0 = np.zeros([4, 1])\n\t\tmu, sigma = init()\n\t\tprint('running UKF')\n\t\tfor i in range(len(imu_ts)):\n\t\t\tif abs(ax[i] ** 2 + ay[i] ** 2 + az[i] ** 2 - 1) > epsilon:\n\t\t\t\tacc_valid = False\n\t\t\telse:\n\t\t\t\tacc_valid = True\n\n\t\t\tmu, sigma, q_ukf = UKF(ox, oy, oz, imu_ts, i, mu, sigma, acc_valid, ax, ay, az)\n\t\t\tmy_mat = quart2mat(q_ukf)\n\t\t\tR.append(my_mat)\n\t\t\trr, pp, yy = transforms3d.euler.mat2euler(my_mat, axes = 'szyx')\n\t\t\tr_my.append(rr)\n\t\t\tp_my.append(pp)\n\t\t\ty_my.append(yy)\n\t\t\tif vicon_exists:\n\t\t\t\tr, p, y = transforms3d.euler.mat2euler(gt_synced[:, :, i], axes = 'szyx')\n\t\t\t\tr_gt.append(r)\n\t\t\t\tp_gt.append(p)\n\t\t\t\ty_gt.append(y)\n\n\t\t\tif show_imu_only_rpy_plots:\n\t\t\t\trrr, ppp, yyy, q0 = onlyGyro(ox, oy, oz, imu_ts, i, q0)\n\t\t\t\tr_g.append(rrr)\n\t\t\t\tp_g.append(ppp)\n\t\t\t\ty_g.append(yyy)\n\t\t\t\trrrr, pppp, yyyy = onlyAcc(ax, ay, az, i)\n\t\t\t\tr_a.append(rrrr)\n\t\t\t\tp_a.append(pppp)\n\t\t\t\ty_a.append(yyyy)\n\n\t\tif show_rpy_plots:\n\t\t\tif vicon_exists:\n\t\t\t\tr_gt_mat = np.asarray(r_gt)\n\t\t\t\tp_gt_mat = np.asarray(p_gt)\n\t\t\t\ty_gt_mat = np.asarray(y_gt)\n\t\t\t\tr_my_mat = np.asarray(r_my)\n\t\t\t\tp_my_mat = np.asarray(p_my)\n\t\t\t\ty_my_mat = np.asarray(y_my)\n\t\t\t\tfig = plt.figure()\n\t\t\t\tax1 = fig.add_subplot(311)\n\t\t\t\tax1.set_ylabel('yaw angle')\n\t\t\t\tax1.plot(imu_ts, r_gt_mat, 'r', label = 'Vicon')\n\t\t\t\tax1.plot(imu_ts, r_my_mat, 'g', label = 'UKF')\n\t\t\t\tax2 = fig.add_subplot(312)\n\t\t\t\tax2.set_ylabel('pitch angle')\n\t\t\t\tax2.plot(imu_ts, p_gt_mat, 'r', label = 'Vicon')\n\t\t\t\tax2.plot(imu_ts, p_my_mat, 'g', label = 'UKF')\n\t\t\t\tax3 = fig.add_subplot(313)\n\t\t\t\tax3.set_ylabel('roll angle')\n\t\t\t\tax3.set_xlabel('time stamp')\n\t\t\t\tax3.plot(imu_ts, y_gt_mat, 'r', label = 'Vicon')\n\t\t\t\tax3.plot(imu_ts, y_my_mat, 'g', label = 'UKF')\n\n\t\t\t\tif show_imu_only_rpy_plots:\n\t\t\t\t\tr_g_mat = np.asarray(r_g)\n\t\t\t\t\tp_g_mat = np.asarray(p_g)\n\t\t\t\t\ty_g_mat = np.asarray(y_g)\n\t\t\t\t\tr_a_mat = np.asarray(r_a)\n\t\t\t\t\tp_a_mat = np.asarray(p_a)\n\t\t\t\t\ty_a_mat = np.asarray(y_a)\n\t\t\t\t\tax1.plot(imu_ts, r_g_mat, 'b', label = 'Gyro')\n\t\t\t\t\tax1.plot(imu_ts, r_a_mat, 'c', label = 'Acc')\n\t\t\t\t\tax2.plot(imu_ts, p_g_mat, 'b', label = 'Gyro')\n\t\t\t\t\tax2.plot(imu_ts, p_a_mat, 'c', label = 'Acc')\n\t\t\t\t\tax3.plot(imu_ts, y_g_mat, 'b', label = 'Gyro')\n\t\t\t\t\tax3.plot(imu_ts, y_a_mat, 'c', label = 'Acc')\n\t\t\t\t\tax1.legend(loc = 'upper right')\n\t\t\t\t\tax2.legend(loc = 'upper right')\n\t\t\t\t\tax3.legend(loc = 'upper right')\n\t\t\t\t\tfig.suptitle('Euler angles for dataset no.' + str(datanum))\n\t\t\t\t\tplt.show()\n\t\t\t\telse:\n\t\t\t\t\tax1.legend(loc = 'upper right')\n\t\t\t\t\tax2.legend(loc = 'upper right')\n\t\t\t\t\tax3.legend(loc = 'upper right')\n\t\t\t\t\tfig.suptitle('Euler angles for dataset no.' + str(datanum))\n\t\t\t\t\tplt.show()\n\t\t\telse:\n\t\t\t\tr_my_mat = np.asarray(r_my)\n\t\t\t\tp_my_mat = np.asarray(p_my)\n\t\t\t\ty_my_mat = np.asarray(y_my)\n\t\t\t\tfig = plt.figure()\n\t\t\t\tax1 = fig.add_subplot(311)\n\t\t\t\tax1.set_ylabel('yaw angle')\n\t\t\t\tax1.plot(imu_ts, r_my_mat, 'g', label = 'UKF')\n\t\t\t\tax2 = fig.add_subplot(312)\n\t\t\t\tax2.set_ylabel('pitch angle')\n\t\t\t\tax2.plot(imu_ts, p_my_mat, 'g', label = 'UKF')\n\t\t\t\tax3 = fig.add_subplot(313)\n\t\t\t\tax3.set_ylabel('roll angle')\n\t\t\t\tax3.set_xlabel('time stamp')\n\t\t\t\tax3.plot(imu_ts, y_my_mat, 'g', label = 'UKF')\n\n\t\t\t\tif show_imu_only_rpy_plots:\n\t\t\t\t\tr_g_mat = np.asarray(r_g)\n\t\t\t\t\tp_g_mat = np.asarray(p_g)\n\t\t\t\t\ty_g_mat = np.asarray(y_g)\n\t\t\t\t\tr_a_mat = np.asarray(r_a)\n\t\t\t\t\tp_a_mat = np.asarray(p_a)\n\t\t\t\t\ty_a_mat = np.asarray(y_a)\n\t\t\t\t\tax1.plot(imu_ts, r_g_mat, 'b', label = 'Gyro')\n\t\t\t\t\tax1.plot(imu_ts, r_a_mat, 'c', label = 'Acc')\n\t\t\t\t\tax2.plot(imu_ts, p_g_mat, 'b', label = 'Gyro')\n\t\t\t\t\tax2.plot(imu_ts, p_a_mat, 'c', label = 'Acc')\n\t\t\t\t\tax3.plot(imu_ts, y_g_mat, 'b', label = 'Gyro')\n\t\t\t\t\tax3.plot(imu_ts, y_a_mat, 'c', label = 'Acc')\n\t\t\t\t\tax1.legend(loc = 'upper right')\n\t\t\t\t\tax2.legend(loc = 'upper right')\n\t\t\t\t\tax3.legend(loc = 'upper right')\n\t\t\t\t\tfig.suptitle('Euler angles for dataset no.' + str(datanum))\n\t\t\t\t\tplt.show()\n\t\t\t\telse:\n\t\t\t\t\tax1.legend(loc = 'upper right')\n\t\t\t\t\tax2.legend(loc = 'upper right')\n\t\t\t\t\tax3.legend(loc = 'upper right')\n\t\t\t\t\tfig.suptitle('Euler angles for dataset no.' + str(datanum))\n\t\t\t\t\tplt.show()\n\n\t\tif show_panorama:\n\t\t\tpanorama = np.zeros(canvassize)\n\t\t\tpano = np.zeros(canvassize)\n\n\t\t\tcamname = os.path.join(camfolder, \"cam\" + str(datanum) + '.mat')\n\t\t\tif not os.path.isfile(camname):\n\t\t\t\tcontinue\n\t\t\tcamdata = io.loadmat(camname)\n\t\t\tcam_ts = camdata['ts']\n\t\t\tcam_ts = cam_ts[0, :]\n\t\t\tcam = camdata['cam']\n\n\t\t\tif vicon_exists and show_panorama_from_vicon:\n\t\t\t\tgt_new = sync_to_cam(gt_ts, cam_ts, gt)\n\t\t\t\tframe_num = len(cam_ts)\n\t\t\t\tprint('Adding frames to panorama from vicon data')\n\t\t\t\t#fig2 = plt.figure()\n\t\t\t\tvideoname = 'Video' + 'Vicon' + str(datanum) + '.mp4'\n\t\t\t\tvideo = cv2.VideoWriter(videoname, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), \\\n\t\t\t\t\t\t\t\t\t\t15, (canvassize[1], canvassize[0]))\n\t\t\t\tcv2.namedWindow(videoname, cv2.WINDOW_NORMAL)\n\n\t\t\t\tfor frame in range(first_still, frame_num-last_still):\n\t\t\t\t\tpanorama = warp(cam[:, :, :, frame], gt_new[:, :, frame], panorama)\n\t\t\t\t\tpanorama = panorama.astype('uint8')\n\n\t\t\t\t\tcv2.imshow(videoname, panorama)\n\t\t\t\t\tkey = cv2.waitKey(100)\n\t\t\t\t\tvideo.write(panorama)\n\t\t\t\tvideo.release()\n\t\t\t\tcv2.destroyAllWindows()\n\t\t\t\t'''\n\t\t\t\tax=fig2.add_subplot(111)\n\t\t\t\tax.imshow(panorama)\n\t\t\t\tfig2.suptitle('Panorama from vicon for dataset no.' + str(datanum))\n\t\t\t\tplt.show()\n\t\t\t\t'''\n\n\t\t\tR_len=np.shape(R)[0]\n\t\t\tR_reshape=np.zeros([3,3,R_len])\n\t\t\tfor eachR in range(R_len):\n\t\t\t\tR_reshape[:,:,eachR]=R[eachR]\n\t\t\tR_new = sync_to_cam(imu_ts, cam_ts, R_reshape)\n\t\t\tframe_num = len(cam_ts)\n\t\t\tprint('Adding frames to panorama from IMU data')\n\t\t\tvideoname = 'Video' + 'IMU' + str(datanum) + '.mp4'\n\t\t\tvideo = cv2.VideoWriter(videoname, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), \\\n\t\t\t\t\t\t\t\t\t15, (canvassize[1], canvassize[0]))\n\t\t\tcv2.namedWindow(videoname, cv2.WINDOW_NORMAL)\n\n\t\t\tfor frame in range(first_still, frame_num - last_still):\n\t\t\t\tpano = warp(cam[:, :, :, frame], R_new[:, :, frame], pano)\n\t\t\t\tpano = pano.astype('uint8')\n\n\t\t\t\tcv2.imshow(videoname, pano)\n\t\t\t\tkey = cv2.waitKey(100)\n\t\t\t\tvideo.write(pano)\n\t\t\tvideo.release()\n\t\t\tcv2.destroyAllWindows()\n\t\t\t'''\n\t\t\tfig3 = plt.figure()\n\t\t\tax = fig3.add_subplot(111)\n\t\t\tax.imshow(pano)\n\t\t\tfig3.suptitle('Panorama from IMU for dataset no.' + str(datanum))\n\t\t\tplt.show()\t\t\t\n\t\t\t'''\n\n\nif __name__ == '__main__':\n\tshowAllResults()","sub_path":"displayResults.py","file_name":"displayResults.py","file_ext":"py","file_size_in_byte":8168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"325885869","text":"import pytest\n\nfrom topic_05_data_structure.hw.set_1_get_info_for_3_set import get_info_for_3_set\n\nparams = [\n (None, None, None, 'Must be set!'),\n ({1}, None, None, 'Must be set!'),\n (None, {1}, None, 'Must be set!'),\n (None, None, {1}, 'Must be set!'),\n\n ({1}, {1}, {1}, {'left == mid == right': True,\n 'left == mid': True,\n 'left == right': True,\n 'mid == right': True,\n 'left & mid': {1},\n 'left & right': {1},\n 'mid & right': {1},\n 'left <= mid': True,\n 'mid <= left': True,\n 'left <= right': True,\n 'right <= left': True,\n 'mid <= right': True,\n 'right <= mid': True}),\n\n ({1}, {2}, {3}, {'left == mid == right': False,\n 'left == mid': False,\n 'left == right': False,\n 'mid == right': False,\n 'left & mid': set(),\n 'left & right': set(),\n 'mid & right': set(),\n 'left <= mid': False,\n 'mid <= left': False,\n 'left <= right': False,\n 'right <= left': False,\n 'mid <= right': False,\n 'right <= mid': False}),\n\n ({1, 2, 3}, {1, 2}, {1, 3}, {'left == mid == right': False,\n 'left == mid': False,\n 'left == right': False,\n 'mid == right': False,\n 'left & mid': {1, 2},\n 'left & right': {1, 3},\n 'mid & right': {1},\n 'left <= mid': False,\n 'mid <= left': True,\n 'left <= right': False,\n 'right <= left': True,\n 'mid <= right': False,\n 'right <= mid': False}),\n]\n\n\nids = [\"left: %s | mid: %s | right: %s => %s\" % (left, mid, right, expected) for (left, mid, right, expected) in params]\n\n\n@pytest.mark.parametrize(argnames=\"left, mid, right, expected\",\n argvalues=params,\n ids=ids)\ndef test_get_info_for_3_set(left, mid, right, expected):\n result = get_info_for_3_set(left, mid, right)\n assert result == expected\n","sub_path":"topic_05_data_structure/hw/tests/set_1_get_info_for_3_set_test.py","file_name":"set_1_get_info_for_3_set_test.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"111802933","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n#\n# Использование функций в качестве переменных\n# Документирование функций\n#\n\n\ndef my_add(a, b):\n \"\"\"Сложенин двух чисел\"\"\" # это документирование функции\n print('Функция my_add')\n return a + b\n\n\ndef my_mult(a, b):\n \"\"\"Умножение двух чисел\"\"\"\n print('Функция my_mult')\n return a * b\n\n\ndef my_max(a, b):\n \"\"\"Большее из двух чисел\"\"\"\n print('Функция my_max')\n if a > b:\n return a\n else:\n return b\n\n\ndef my_min(a, b):\n \"\"\"Меньшее из двух чисел\"\"\"\n print('Функция my_min')\n if a < b:\n return a\n else:\n return b\n\n\n# Вывод документирования функций\n# Использование функций в качестве переменных\n\nprint('\\nИспользование функций в качестве переменных:')\nprint('-' * 46)\nprint('1. Функция my_add\\t:' + my_add.__doc__)\nprint('2. Функция my_mult\\t:' + my_mult.__doc__)\nprint('3. Функция my_max\\t:' + my_max.__doc__)\nprint('4. Функция my_min\\t:' + my_min.__doc__)\nprint('-' * 46)\nnom_func = int(input('Введите номер функции\\t: '))\n\nif nom_func == 1:\n run = my_add\nelif nom_func == 2:\n run = my_mult\nelif nom_func == 3:\n run = my_max\nelif nom_func == 4:\n run = my_min\nelse:\n print('Ошибка! Не верно введен номер функции!')\n\nif run != 0:\n a = float(input('Введите число А\\t\\t: '))\n b = float(input('Введите число B\\t\\t: '))\n print('-' * 46)\n result = run(a, b)\n print('Результат = ' + str(result))\n","sub_path":"Ex19.py","file_name":"Ex19.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"381597927","text":"import os\nimport numpy as np\nimport h5py\nimport threading\nimport Queue\nimport math\n\nshapenet_category_to_id = {\n'airplane'\t: '02691156',\n'bench'\t\t: '02828884',\n'cabinet'\t: '02933112',\n'car'\t\t: '02958343',\n'chair'\t\t: '03001627',\n'lamp'\t\t: '03636649',\n'monitor'\t: '03211117',\n'rifle'\t\t: '04090263',\n'sofa'\t\t: '04256520',\n'speaker'\t: '03691459',\n'table'\t\t: '04379243',\n'telephone'\t: '04401088',\n'vessel'\t: '04530566'\n}\n\nclass DataFetcher(threading.Thread):\n def __init__(self, mode, batch_size = 32, epoch = 10):\n super(DataFetcher, self).__init__()\n self.stopped = False\n self.epoch = epoch\n self.current_epoch = 0\n self.queue = Queue.Queue(2)\n self.batch_size = batch_size\n self.mode = mode\n if self.mode == 'train':\n # self.image_path = '/media/tree/backup/projects/AttentionBased/data/train/image_192_256_12'\n self.image_path = '/media/tree/backup/projects/AttentionBased/data/train/image_256_256_12'\n self.point_path = '/media/tree/backup/projects/AttentionBased/data/train/point_16384_12'\n # self.point_path = '/media/tree/backup/projects/AttentionBased/data/train/point_1024_12'\n else:\n # self.image_path = '/media/tree/backup/projects/AttentionBased/data/test/image_192_256_12'\n self.image_path = '/media/tree/backup/projects/AttentionBased/data/test/image_256_256_12'\n self.point_path = '/media/tree/backup/projects/AttentionBased/data/test/point_16384_12'\n # self.point_path = '/media/tree/backup/projects/AttentionBased/data/test/point_1024_12'\n self.iter, self.cats_batches = self.calculate_cat_batch_number()\n \n def calculate_cat_batch_number(self):\n count = 0\n cats = shapenet_category_to_id.values()\n cat_batch_number = []\n for cat in cats:\n with h5py.File(os.path.join(self.image_path, '{}.h5'.format(cat)), 'r') as f:\n batch_number = f['image'].shape[0] / self.batch_size\n cat_batch_number.append(batch_number)\n count += batch_number\n cats_batches = dict(zip(cats, cat_batch_number))\n print(cats_batches)\n return count, cats_batches\n\n def run(self):\n if self.mode == 'train':\n while self.current_epoch < self.epoch:\n for cat, batch in self.cats_batches.iteritems():\n with h5py.File(os.path.join(self.image_path, '{}.h5'.format(cat)), 'r') as fi:\n with h5py.File(os.path.join(self.point_path, '{}.h5'.format(cat)), 'r') as fp:\n for i in range(0, batch * self.batch_size, self.batch_size):\n if self.stopped:\n break\n self.queue.put((fi['image'][i:i+self.batch_size].astype('float32') / 255.0, fp['point'][i:i+self.batch_size]))\n self.current_epoch += 1\n elif self.mode == 'predict':\n # for cat, batch in self.cats_batches.iteritems():\n # with h5py.File(os.path.join(self.image_path, '{}.h5'.format(cat)), 'r') as fi:\n # with h5py.File(os.path.join(self.point_path, '{}.h5'.format(cat)), 'r') as fp:\n # for i in range(0, batch * self.batch_size, self.batch_size):\n # if self.stopped:\n # break \n # self.queue.put((fi['point'][i:i+self.batch_size], fp['point'][i:i+self.batch_size]))\n\n cat = shapenet_category_to_id['chair']\n batch = self.cats_batches[cat]\n with h5py.File(os.path.join(self.image_path, '{}.h5'.format(cat)), 'r') as fi:\n with h5py.File(os.path.join(self.point_path, '{}.h5'.format(cat)), 'r') as fp:\n for i in range(300, batch * self.batch_size, self.batch_size):\n if self.stopped:\n break \n self.queue.put((fi['image'][i:i+self.batch_size].astype('float32') / 255.0, fp['point'][i:i+self.batch_size]))\n \n \n else:\n for cat, batch in self.cats_batches.iteritems():\n with h5py.File(os.path.join(self.image_path, '{}.h5'.format(cat)), 'r') as fi:\n with h5py.File(os.path.join(self.point_path, '{}.h5'.format(cat)), 'r') as fp:\n for i in range(0, batch * self.batch_size, self.batch_size):\n if self.stopped:\n break \n self.queue.put((fi['image'][i:i+self.batch_size].astype('float32') / 255.0, fp['point'][i:i+self.batch_size]))\n\n def fetch(self):\n if self.stopped:\n return None\n return self.queue.get()\n\t\n def shutdown(self):\n self.stopped = True\n while not self.queue.empty():\n self.queue.get()\n\n\n\nif __name__ == '__main__':\n data = DataFetcher('test',batch_size = 1)\n data.start()\n image, point = data.fetch()\n # current = 0\n\n # #create white background\n # background = np.zeros((256,256,3), dtype = np.uint8)\n # background.fill(255)\n\n # # 1. image (obj rendering) \n # img = image[current, ...] * 255\n # img = img.astype('uint8')\n # img += background\n # img = np.where(img > 255 , img - 255, img)\n # cv2.imwrite('{:0>4}.png'.format(current), img)\n\n # # 3. gt_rendering\n # gt_rendering = background\n # X, Y, Z = point.T\n # F = 284\n # h = (-Y)/(-Z)*F + 256/2.0\n # w = X/(-Z)*F + 256/2.0\n # # h = np.minimum(np.maximum(h, 0), 255)\n # # w = np.minimum(np.maximum(w, 0), 255)\n # gt_rendering[np.round(h).astype(int), np.round(w).astype(int), 0] = 0\n # gt_rendering[np.round(h).astype(int), np.round(w).astype(int), 2] = 0\n # cv2.imwrite('{:0>4}.jpg'.format(current), gt_rendering)\n data.shutdown()\n","sub_path":"utils/datafetcher.py","file_name":"datafetcher.py","file_ext":"py","file_size_in_byte":5909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"553947012","text":"import numpy as np\nfrom verification_tools import calc_limits\n\n\n#configs = [{'filter':'clear','disperser':'prism'},\n# {'filter':'f100lp','disperser':'g140h'},\n# {'filter':'f170lp','disperser':'g235m'}]\nconfigs = [{'filter':'f070lp','disperser':'g140h'},\n {'filter':'f100lp','disperser':'g140h'},\n {'filter':'f170lp','disperser':'g235h'},\n {'filter':'f290lp','disperser':'g395h'},\n {'filter':'f070lp','disperser':'g140m'},\n {'filter':'f100lp','disperser':'g140m'},\n {'filter':'f170lp','disperser':'g235m'},\n {'filter':'f290lp','disperser':'g395m'},\n {'filter':'clear','disperser':'prism'}]\n\n#apertures = np.array([0.1*1.7,0.1*1.7,0.1*1.7])\n#idt_fluxes = np.array([1e-4,5e-3,1e-3])\n#skyfacs = np.array([1.,1.,1.])\napertures = np.array([0.105*2,0.105*2,0.105*2,0.105*2,0.105*2,0.105*2,0.105*2,0.105*2,0.105*2])\nidt_fluxes = np.array([1e-2,1e-2,1e-2,1e-2,1e-3,1e-3,1e-3,1e-3,1e-4])\n\nobsmode = {\n 'instrument': 'nirspec',\n 'mode': 'ifu',\n 'filter': 'f070lp',\n 'aperture': 'ifu',\n 'disperser': 'g140h'\n }\nexp_config = {\n 'subarray': 'full',\n 'readout_pattern': 'nrsirs2',\n 'ngroup': 17,\n 'nint': 1,\n 'nexp': 4\n }\nstrategy = {\n 'target_xy': [0.0, 0.0],\n 'method': 'ifunodinscene',\n 'aperture_size': 0.15,\n 'dithers': [{'x':0,'y':0},{'x':1,'y':1}],\n \"units\": \"arcsec\"\n }\n\noutputs_regular, outputs_one = calc_limits.calc_limits(configs,apertures,idt_fluxes,obsmode=obsmode,scanfac=150,\n exp_config=exp_config,strategy=strategy,background='minzodi12')\n\nnp.savez('../../outputs/nirspec_ifu_sensitivity.npz',\n wavelengths=outputs_regular['wavelengths'], sns=outputs_regular['sns'], lim_fluxes=outputs_regular['lim_fluxes'], sat_limits=outputs_regular['sat_limits'], configs=outputs_regular['configs'])\n\nnp.savez('../../outputs/nirspec_ifu_sensitivity_one.npz',\n wavelengths=outputs_one['wavelengths'], sns=outputs_one['sns'], lim_fluxes=outputs_one['lim_fluxes'], sat_limits=outputs_one['sat_limits'], configs=outputs_one['configs'])\n","sub_path":"tests/nirspec/nirspec_sensitivity_ifu.py","file_name":"nirspec_sensitivity_ifu.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"598329779","text":"from copy import copy\nimport random\nprev = 1805\ncurr = 2150\nMIN_C = 0\nMAX_C = 4999\ndirection = 1 if curr - prev > 0 else -1\narr = [2069, 1212, 2296, 2800, 544, 1618, 356, 1523, 4965, 3681]\n\n\ndef prt_move_msg(f, t):\n print('move from cylinder {} to cylinder {} '.format(f, t))\n\n\ndef search_min_and_return_index(array):\n min = array[0]\n index = 0\n for i, v in enumerate(array):\n if v < min:\n index = i\n min = v\n return index\n\n\ndef search_index(array, v):\n index = -1\n for i, _v in enumerate(array):\n if v == _v:\n index = i\n return i\n\n\ndef split_by_value(sorted_arr, v):\n lo = []\n hi = []\n for i in sorted_arr:\n if i > v:\n hi.append(i)\n elif i < v:\n lo.append(i)\n return lo, hi\n\n\ndef fcfs():\n print(_fcfs(arr, curr))\n\n\ndef _fcfs(_arr, _curr):\n total = 0\n total += abs(_arr[0] - _curr)\n prt_move_msg(_curr, _arr[0])\n for i in range(len(_arr) - 1):\n total += abs(_arr[i] - _arr[i + 1])\n prt_move_msg(_arr[i], _arr[i + 1])\n return total\n\n\ndef sstf():\n total = 0\n pcurr = curr\n _arr = copy(arr)\n for nouse in range(len(_arr)):\n disarr = copy(_arr)\n for i, v in enumerate(_arr):\n disarr[i] = abs(v - pcurr)\n min_index = search_min_and_return_index(disarr)\n total += abs(_arr[min_index] - pcurr)\n prt_move_msg(pcurr, _arr[min_index])\n pcurr = _arr[min_index]\n del _arr[min_index]\n print(total)\n\n\ndef scan():\n total = 0\n parr = sorted(arr)\n pcurr = curr\n lo, hi = split_by_value(parr, curr)\n rev = [0, 1] if direction > 0 else [1, 0]\n hi.sort(reverse=rev[0])\n lo.sort(reverse=rev[1])\n hilo = [hi, lo] if direction == 1 else [lo, hi]\n total += _fcfs(hilo[0], pcurr)\n total += MAX_C - hi[-1]\n prt_move_msg(hi[-1], MAX_C)\n pcurr = MAX_C\n total += _fcfs(hilo[1], pcurr)\n print(total)\n\n\ndef look():\n total = 0\n parr = sorted(arr)\n pcurr = curr\n lo, hi = split_by_value(parr, curr)\n rev = [0, 1] if direction > 0 else [1, 0]\n hi.sort(reverse=rev[0])\n lo.sort(reverse=rev[1])\n hilo = [hi, lo] if direction == 1 else [lo, hi]\n total += _fcfs(hilo[0], pcurr)\n pcurr = hilo[0][-direction]\n total += _fcfs(hilo[1], pcurr)\n print(total)\n\n\ndef cscan():\n total = 0\n parr = sorted(arr)\n pcurr = curr\n lo, hi = split_by_value(parr, curr)\n rev = 0 if direction > 0 else 1\n hi.sort(reverse=rev)\n lo.sort(reverse=rev)\n hilo = [hi, lo] if direction == 1 else [lo, hi]\n total += _fcfs(hilo[0], pcurr)\n total += MAX_C - hi[-1]\n prt_move_msg(hi[-1], MAX_C)\n total += MAX_C - MIN_C\n prt_move_msg(MAX_C, MIN_C)\n pcurr = MIN_C\n total += _fcfs(hilo[1], pcurr)\n print(total)\n\n\ndef clook():\n total = 0\n parr = sorted(arr)\n pcurr = curr\n lo, hi = split_by_value(parr, curr)\n rev = 0 if direction > 0 else 1\n hi.sort(reverse=rev)\n lo.sort(reverse=rev)\n hilo = [hi, lo] if direction == 1 else [lo, hi]\n total += _fcfs(hilo[0], pcurr)\n pcurr = hilo[0][-1]\n total += _fcfs(hilo[1], pcurr)\n print(total)\n\n\ndef main():\n global arr, prev, curr\n while True:\n arr=list()\n prev=int(input('input initial position(0-4999): '))\n curr=int(random.uniform(0,4999))\n arr.append(curr)\n for i in range(999):\n arr.append(int(random.uniform(0,4999)))\n while True:\n s = input('method: ')\n if s == 'fcfs':\n fcfs()\n elif s == 'sstf':\n sstf()\n elif s == 'scan':\n scan()\n elif s == 'look':\n look()\n elif s == 'cscan':\n cscan()\n elif s == 'clook':\n clook()\n elif s == 'exit':\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"py/disk_sched_calc.py","file_name":"disk_sched_calc.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"345432613","text":"#!/usr/bin/env python\n\n\"\"\"\nWatchdog Test Cases modules for unittest\n\n\"\"\"\n\nimport commands\nimport unittest\nimport time\n\nclass TestWatchdog(unittest.TestCase):\n \"\"\" Generic Tests for Watchdog reboot.\n\n Keyword arguments:\n - testname : The name of the test to be executed.\n - device: Optional Watchdog device path.\n - i2cbus : Optional I2C bus number.\n - address : Optional device I2C address (in hexadecimal)\n - register : Optional device I2C register (in hexadecimal)\n - testdescription: Optional test description to overwrite the default.\n\n Prerequisite commands:\n - i2c-tools\n\n \"\"\"\n\n def __init__(self, testname, device='', i2cbus='', address='', register='', testdescription=''):\n super(TestWatchdog, self).__init__(testname)\n self.device = device\n self.i2cbus = i2cbus\n self.address = address\n self.register = register\n\n # Overwrite test description\n if testdescription:\n self._testMethodDoc = testdescription\n\n def test_igep0046_watchdog(self):\n \"\"\" Test IGEP0046 Watchdog : Check if device has been rebooted before\n\n Type: Functional\n\n Description:\n Trigger watchdog counter and set a volatile magic number to\n PMIC volatile memory. Finally, parse reboot procedure.\n\n .. warning::\n\n Test is not valid if a Coin cell battery is used.\n Test is only valid for IGEP0046.\n To reduce test procedure time. Add this test at the beginning\n of testsuite.\n \"\"\"\n\n # Parse if IGEP0046 has rebooted before\n retval = commands.getstatusoutput('i2cget -f -y %s %s %s'\n % (self.i2cbus, self.address, self.register))\n self.failUnless(retval[0] == 0, \"failed: Can't execute 'i2cget'\")\n\n if not retval[1] == '0x89':\n # Board need to be rebooted\n retval = commands.getstatusoutput(\"reset > /dev/tty0\")\n self.failUnless(retval[0] == 0, \"failed: Can't execute 'reset > /dev/tty0'\")\n retval = commands.getstatusoutput(\"echo '\\033[37mTest IGEP0046 Watchdog : Board need to reboot to PASS test. Reboot on. WAIT. \\033' > /dev/tty0\")\n self.failUnless(retval[0] == 0, \"failed: Can't execute 'echo'\")\n retval = commands.getstatusoutput(\"echo '\\033[37mTest IGEP0046 Watchdog : If board does not reboot after 20 seconds. Test IGEP0046 Watchdog : FAIL. \\033' > /dev/tty0\")\n self.failUnless(retval[0] == 0, \"failed: Can't execute 'echo'\")\n # Set magic number\n retval = commands.getstatusoutput('i2cset -f -y %s %s %s 0x89'\n % (self.i2cbus, self.address, self.register))\n self.failUnless(retval[0] == 0,\n 'failed: Cannot read at I2C bus %s address and %s register %s'\n % (self.i2cbus, self.address, self.register))\n # Reboot (enable watchdog)\n retval = commands.getstatusoutput('reboot')\n self.failUnless(retval[0] == 0,\n 'failed: Cannot reboot board')\n # Reboot timeout\n time.sleep(20)\n retval = commands.getstatusoutput(\"echo '\\033[37mTest IGEP0046 Watchdog : Reboot FAILED. Test cannot be executed \\033' > /dev/tty0\")\n self.failUnless(retval[0] == 0, \"failed: Can't execute 'echo'\")\n self.fail(\"Error, reboot failed\")","sub_path":"igep_qa/tests/qwatchdog.py","file_name":"qwatchdog.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"135989058","text":"import tensorflow as tf\nfrom tensorflow.python.framework.graph_util import convert_variables_to_constants\n\nnum_channels = 1\npatch_size = 2\ndepth = 36\nnum_nodes_layer3 = 512\nnum_nodes_output = 2\ngrid_height = 5\ngrid_width = 5\nparams_dir = 'parameters/location/model.ckpt-9501'\noutput_file = 'graph_location'\n\n# Instantiate graph\ngraph = tf.Graph()\n\n# Instantiate session\nsession = tf.InteractiveSession(graph=graph)\n\n# Reconstruct model\nwith graph.as_default():\n \n # Model input\n tf_x = tf.placeholder(tf.float32, shape=[1, 5, 5, 1], name='input')\n \n # Variables\n w1 = tf.Variable(tf.truncated_normal([patch_size, patch_size, num_channels, depth], stddev=0.1), name='w1', dtype=tf.float32)\n b1 = tf.Variable(tf.zeros([depth]), name='b1', dtype=tf.float32)\n w2 = tf.Variable(tf.truncated_normal([patch_size, patch_size, depth, depth], stddev=0.1), name='w2', dtype=tf.float32)\n b2 = tf.Variable(tf.zeros([depth]), name='b2', dtype=tf.float32)\n w3 = tf.Variable(tf.truncated_normal([grid_height * grid_width * depth, num_nodes_layer3], stddev=0.1), name='w3', dtype=tf.float32)\n b3 = tf.Variable(tf.zeros([num_nodes_layer3]), name='b3', dtype=tf.float32)\n w4 = tf.Variable(tf.truncated_normal([num_nodes_layer3, num_nodes_output], stddev=0.1), name='w4', dtype=tf.float32)\n b4 = tf.Variable(tf.zeros([num_nodes_output]), name='b4', dtype=tf.float32)\n \n # Compute\n # First convolutional layer\n c1 = tf.nn.conv2d(tf_x, w1, strides=[1, 1, 1, 1], padding='SAME', name='c1')\n h1 = tf.nn.relu(c1 + b1, name='h1')\n \n # Second convolutional layer\n c2 = tf.nn.conv2d(h1, w2, strides=[1, 1, 1, 1], padding='SAME', name='c2')\n h2 = tf.nn.relu(c2 + b2, name='h2')\n \n # Reshape for fully connected layer\n h2_shape = tf_x.get_shape().as_list()\n h2_out_vec = tf.reshape(h2, shape=[h2_shape[0], grid_height * grid_width * depth], name='h2_out_vec')\n \n # First fully connected layer\n a3 = tf.add(tf.matmul(h2_out_vec, w3), b3, name='a3')\n h3 = tf.nn.relu(a3, name='h3')\n \n # Model output\n output = tf.add(tf.matmul(h3, w4), b4, name='output')\n \n # Restore saved model params\n var_dict = {'w1': w1,\n 'b1': b1,\n 'w2': w2,\n 'b2': b2,\n 'w3': w3,\n 'b3': b3,\n 'w4': w4,\n 'b4': b4\n }\n saver = tf.train.Saver(var_dict)\n init = tf.initialize_all_variables()\n session.run(init)\n saver.restore(session, params_dir)\n\n# Serialize graph for c++\nfrozen_graph = convert_variables_to_constants(session, session.graph_def, ['output'])\ntf.train.write_graph(frozen_graph, '.', output_file + '.pb', as_text=False)\ntf.train.write_graph(frozen_graph, '.', output_file + '.txt', as_text=True)\n","sub_path":"model/model_def_location.py","file_name":"model_def_location.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"512028084","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('author', models.CharField(max_length=100, verbose_name='\\u4f5c\\u8005')),\n ('title', models.CharField(max_length=100, verbose_name='\\u6807\\u9898')),\n ('alias', models.CharField(max_length=100, blank=True, help_text='\\u505a\\u4f2a\\u9759\\u6001url\\u7528', null=True, verbose_name='\\u82f1\\u6587\\u6807\\u9898', db_index=True)),\n ('is_top', models.BooleanField(default=False, verbose_name=b'\\xe7\\xbd\\xae\\xe9\\xa1\\xb6')),\n ('summary', models.TextField(verbose_name='\\u6458\\u8981')),\n ('content', models.TextField(verbose_name='\\u6587\\u7ae0\\u6b63\\u6587rst\\u683c\\u5f0f')),\n ('content_html', models.TextField(verbose_name='\\u6587\\u7ae0\\u6b63\\u6587html\\u683c\\u5f0f')),\n ('view_times', models.IntegerField(default=1)),\n ('tags', models.CharField(help_text='\\u7528\\u82f1\\u6587\\u9017\\u53f7\\u5206\\u5272', max_length=100, null=True, verbose_name='\\u6807\\u7b7e', blank=True)),\n ('status', models.IntegerField(default=0, verbose_name='\\u72b6\\u6001', choices=[(0, '\\u6b63\\u5e38'), (1, '\\u8349\\u7a3f'), (2, '\\u5220\\u9664')])),\n ('is_old', models.BooleanField(default=False, verbose_name='\\u662f\\u5426\\u4e3a\\u65e7\\u6570\\u636e')),\n ('pub_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='\\u53d1\\u5e03\\u65f6\\u95f4')),\n ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\\u521b\\u5efa\\u65f6\\u95f4')),\n ('update_time', models.DateTimeField(auto_now=True, verbose_name='\\u66f4\\u65b0\\u65f6\\u95f4')),\n ],\n options={\n 'ordering': ['-is_top', '-pub_time', '-create_time'],\n 'verbose_name': '\\u6587\\u7ae0',\n 'verbose_name_plural': '\\u6587\\u7ae0',\n },\n ),\n ]\n","sub_path":"liushuizhang_site/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"638896179","text":"\"\"\" \n product_styles.view test\n\"\"\"\n\nimport json\nimport os\nimport sys\nimport unittest\n#import mock\n\nsys.path.insert(0, os.path.abspath(os.path.dirname(\n os.path.realpath(__file__)) + '../../../../../../../'))\nsys.path.insert(0, os.path.abspath(os.path.dirname(\n os.path.realpath(__file__)) + '/../../../../../../lib/'))\nsys.path.insert(0, os.path.abspath(os.path.dirname(\n os.path.realpath(__file__)) + '/../../../../../../conf/'))\n\nfrom inspired_config import SQLALCHEMY_DATABASE_URI\nTEST_URI = SQLALCHEMY_DATABASE_URI + '_test'\n\n#from sqlalchemy import create_engine\n#from sqlalchemy.orm import scoped_session, sessionmaker\n#from sqlalchemy.pool import StaticPool\n\nfrom database import Base, init_engine, db_session, init_models\nfrom inspired.v1.lib.ref_product_styles.models import RefProductStyle\nfrom inspired.v1.api.main import create_app\n\nclass ProductsApiTestCase(unittest.TestCase):\n \"\"\"Tests for the API /v1/products methods\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Bootstrap test environment by creating the db engine and app \"\"\"\n init_models()\n cls.app = create_app(TEST_URI)\n cls.app.config['TESTING'] = True\n cls.app.config['CSRF_ENABLED'] = False\n cls.client = cls.app.test_client()\n #cls.engine = create_engine('sqlite:///:memory:',\n #connect_args={'check_same_thread':False},\n #poolclass=StaticPool)\n cls._ctx = cls.app.test_request_context()\n cls._ctx.push()\n cls.engine = init_engine(TEST_URI)\n cls.connection = cls.engine.connect()\n cls.db_session = db_session\n Base.query = cls.db_session.query_property()\n Base.metadata.create_all(cls.engine)\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Delete the test schema and connection \"\"\"\n Base.metadata.drop_all(cls.engine)\n cls.db_session.close()\n\n def setUp(self):\n \"\"\" use subsessions and do a rollback after each test. \"\"\"\n self._ctx = self.app.test_request_context()\n self._ctx.push()\n self.db_session.begin(subtransactions=True)\n\n def tearDown(self):\n \"\"\" use subsessions and do a rollback after each test. \"\"\"\n self.db_session.rollback()\n self.db_session.close()\n self.engine.dispose()\n ## need to clear the table and auto-increment counter\n self.connection.execute(\"TRUNCATE ref_product_styles\")\n self._ctx.pop() \n\n\n def test_check_if_product_style_exists(self):\n \"\"\" testing checking if a product_style exists \"\"\"\n name = 'abc'\n args = {\n 'name': name,\n }\n product_style = RefProductStyle(**args)\n self.db_session.add(product_style)\n self.db_session.commit()\n response = self.client.get('/api/v1/product_styles/%i' % product_style.id)\n self.assertEquals(response.status_code, 200)\n self.assertEquals(response.headers['Content-Type'], 'application/json')\n self.assertTrue(json.loads(response.data)['success'])\n for var in ['name']:\n self.assertEquals(json.loads(response.data)['data'][var], \n locals()[var])\n self.db_session.delete(product_style)\n self.assertEqual(self.db_session.commit(), None)\n\n\n def test_add_one_product_style(self):\n \"\"\" testing adding a product_style \"\"\"\n name = 'abc'\n args = {\n 'name': name,\n }\n response = self.client.post('/api/v1/product_styles', data=json.dumps(\n args), content_type='application/json')\n self.assertEquals(response.headers['Content-Type'], 'application/json')\n self.assertEquals(response.status_code, 201)\n self.assertTrue(json.loads(response.data)['success'])\n self.assertEquals(json.loads(response.data)['data']['id'], 1)\n\n\n def test_add_two_product_styles(self):\n \"\"\" testing adding two product_styles \"\"\"\n name = 'abc'\n args = {\n 'name': name,\n }\n name = 'xyz'\n args2 = {\n 'name': name,\n }\n for id, values in enumerate([args, args2], 1):\n response = self.client.post('/api/v1/product_styles', \n data=json.dumps(values), \n content_type='application/json')\n self.assertEquals(response.headers['Content-Type'], \n 'application/json')\n self.assertEquals(response.status_code, 201)\n self.assertTrue(json.loads(response.data)['success'])\n self.assertEquals(json.loads(response.data)['data']['id'], id)\n\n\n def test_add_two_product_styles_same_name(self):\n \"\"\" testing adding two product_styles with same name \"\"\"\n name = 'abc'\n args = {\n 'name': name,\n }\n args2 = {\n 'name': name,\n }\n for id, values in enumerate([args, args2], 1):\n response = self.client.post('/api/v1/product_styles', \n data=json.dumps(values), \n content_type='application/json')\n self.assertEquals(response.headers['Content-Type'], \n 'application/json')\n if id == 1:\n self.assertEquals(response.status_code, 201)\n self.assertEquals(json.loads(response.data)['data']['id'], id)\n self.assertTrue(json.loads(response.data)['success'])\n else:\n self.assertEquals(response.status_code, 409)\n self.assertFalse(json.loads(response.data)['success'])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/lib_tests/inspired_tests/v1_tests/api_tests/product_styles_tests/test_view.py","file_name":"test_view.py","file_ext":"py","file_size_in_byte":5589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"538452086","text":"from manimlib.imports import *\n\nclass OpeningScene(Scene):\n def construct(self):\n text0 = TextMobject(r'Perfect Number\\\\完全数')\n self.play(Write(text0))\n self.wait()\n\nclass Illustration(Scene):\n def showtextthenfade(self, textmobject, time):\n self.play(Write(textmobject))\n self.wait(duration=time)\n self.play(FadeOut(textmobject))\n\n def construct(self):\n text0 = TextMobject(r'什么是完全数?')\n self.showtextthenfade(text0, 3)\n text1 = TextMobject(r'完全数是它所有的真因子\\\\(即除了自身以外的约数)\\\\恰好等于其自身的数')\n self.showtextthenfade(text1, 3)\n # '''\n text2 = TextMobject(r'比如说:\\\\ $6 = 1 \\times 2 \\times 3$ \\\\ $6 = 1 + 2 + 3$')\n self.showtextthenfade(text2, 3)\n text3 = TextMobject(r'28有因数:', r'1', r', ', r'2',r',', r'4', r', ', r'7', r', ', r'14')\n text4 = TexMobject(r'28=', r'1', r'+ ', r'2', r'+ ', r'4', r'+ ', r'7', r'+ ', r'14').next_to(text3, DOWN)\n self.play(Write(text3))\n self.play(\n Write(text4[0]), Write(text4[2]), Write(text4[4]), Write(text4[6]), Write(text4[8]),\n ReplacementTransform(text3[1], text4[1]),\n ReplacementTransform(text3[3], text4[3]),\n ReplacementTransform(text3[5], text4[5]),\n ReplacementTransform(text3[7], text4[7]),\n ReplacementTransform(text3[9], text4[9]),\n )\n self.play(FadeOut(text3), FadeOut(text4))\n text5 = TextMobject(r'这种把除自身之外的各因数相加\\\\的计算也叫“等额和”(aliquot sum)')\n text6 = TextMobject(r'也可以用下面的方程来表示:\\\\ $\\sigma _1 (n)=2n$($\\sigma$ 函数指把自身所有因数相加)')\n self.showtextthenfade(text5, 3)\n self.showtextthenfade(text6, 3)\n # '''\n\nclass PerfectNumbers(Illustration):\n def construct(self):\n text0 = TextMobject(r'那么,除了6和28,还有哪些已经发现的完全数呢?')\n self.showtextthenfade(text0, 3)\n text1 = TextMobject(r'6\\\\28\\\\496\\\\8128\\\\33550336\\\\8589869056\\\\137438691328\\\\2305843008139952128\\\\',\n r'2658455991569831744654692615953842176\\\\191561942608236107294793378084303638130997321548169216\\\\',\n r'....')\n self.showtextthenfade(text1, 3)\n text2 = TextMobject(r'这是我在OESI(Sequence A000396)上找到的10个数,\\\\最后一个已经相当大了.')\n self.showtextthenfade(text2, 3)\n text3 = TextMobject('你可以倒回去看一看有没有发现这些数字有什么规律')\n self.showtextthenfade(text3, 1)\n\nclass Patten(Illustration):\n\n def construct(self):\n text0 = TextMobject(r'再看一遍前面的数列,注意数字的末尾')\n self.showtextthenfade(text0, 3)\n text1 = TextMobject(r'6\\\\',r'28\\\\',r'49',r'6\\\\',r'81', r'28\\\\',r'3355033', r'6\\\\',r'858986905', r'6\\\\',r'1374386913', r'28\\\\',r'23058430081399521', r'28\\\\',\n r'265845599156983174465469261595384217', r'6\\\\',r'19156194260823610729479337808430363813099732154816921', r'6\\\\',\n r'....')\n text_6 = TextMobject(r'6')\n text_28 = TextMobject(r'28')\n colorful = [text1[0], text1[1], text1[3], text1[5], text1[7], text1[9], text1[11], text1[13], text1[15], text1[17]]\n for each in colorful:\n each.set_color(BLUE)\n self.play(Write(text1), run_time=2)\n self.wait(2)\n self.play(FadeOut(text1))\n text2 = TextMobject(r'所以发现了吗,至今发现的完全数都是以6或28结尾的!')\n text3 = TextMobject(r'更有趣的是,它们除1之外的所有因数的倒数和都等于1')\n text4 = TextMobject(r'6-->$1 = {{1}\\over{2}} + {{1}\\over{3}} + {{1}\\over{6}}$')\n text5 = TextMobject(r'28-->$1 = {{1}\\over {2}}+{{1}\\over {4}}+{{1}\\over {7}} + {{1}\\over {14}}+{{1}\\over {28}}$')\n text6 = TextMobject(r'496-->$1 = {{1}\\over {2}}+{{1}\\over {4}}+{{1}\\over {8}}+{{1}\\over {16}}+{{1}\\over {31}}+{{1}\\over {62}}+{{1}\\over {124}}+{{1}\\over {248}}+{{1}\\over {496}}$\\\\....')\n self.showtextthenfade(text2, 3)\n self.showtextthenfade(text3, 3)\n self.play(\n Write(text4.to_edge(UP)),\n Write(text5.next_to(text4, DOWN)),\n Write(text6.next_to(text5, DOWN)),\n )\n self.play(Write(TextMobject(r'除此之外,它还有很多神奇的性质').next_to(text6, DOWN)))\n\nclass TriangularNumbers(Illustration):\n def construct(self):\n text0 = TextMobject(r'1.每一个完全数都是三角形数')\n text1 = TextMobject(r'什么是三角形数?').next_to(text0, DOWN)\n self.play(Write(text0))\n self.wait(2)\n self.play(Write(text1))\n self.wait(2)\n self.play(\n text1.to_corner, UL,\n FadeOutAndShiftDown(text0)\n )\n text2 = TexMobject(r'1').move_to(np.array([0,-3,0]))\n text3 = TexMobject(r'3').move_to(np.array([0,-3,0]))\n text4 = TexMobject(r'6').move_to(np.array([0,-3,0]))\n text5 = TexMobject(r'10').move_to(np.array([0,-3,0]))\n text6 = TexMobject(r'....').move_to(np.array([0,-3,0]))\n self.play(Write(text2))\n c1 = Circle().shift(UP).set_color(BLUE).set_fill(opacity=100).scale(0.3)\n self.play(ShowCreation(c1))\n self.wait(2)\n c2 = Circle().set_color(BLUE).set_fill(opacity=100).scale(0.3).next_to(c1, DOWN, buff=SMALL_BUFF).shift(LEFT*0.5)\n c3 = Circle().set_color(BLUE).set_fill(opacity=100).scale(0.3).next_to(c1, DOWN, buff=SMALL_BUFF).shift(RIGHT*0.5)\n self.play(\n ShowCreation(c2),\n ShowCreation(c3),\n Transform(text2, text3),\n )\n self.wait(2)\n c4 = Circle().set_color(BLUE).set_fill(opacity=100).scale(0.3).next_to(c3, DOWN, buff=SMALL_BUFF).shift(LEFT*0.5)\n c5 = Circle().set_color(BLUE).set_fill(opacity=100).scale(0.3).next_to(c3, DOWN, buff=SMALL_BUFF).shift(RIGHT*0.5)\n c6 = Circle().set_color(BLUE).set_fill(opacity=100).scale(0.3).next_to(c2, DOWN, buff=SMALL_BUFF).shift(LEFT*0.5)\n self.play(\n ShowCreation(c4),\n ShowCreation(c5),\n ShowCreation(c6),\n Transform(text2, text4)\n )\n self.wait(2)\n c7 = Circle().set_color(BLUE).set_fill(opacity=100).scale(0.3).next_to(c6, DOWN, buff=SMALL_BUFF).shift(LEFT*0.5)\n c8 = Circle().set_color(BLUE).set_fill(opacity=100).scale(0.3).next_to(c6, DOWN, buff=SMALL_BUFF).shift(RIGHT*0.5)\n c9 = Circle().set_color(BLUE).set_fill(opacity=100).scale(0.3).next_to(c5, DOWN, buff=SMALL_BUFF).shift(LEFT*0.5)\n c10 = Circle().set_color(BLUE).set_fill(opacity=100).scale(0.3).next_to(c5, DOWN, buff=SMALL_BUFF).shift(RIGHT*0.5)\n self.play(\n ShowCreation(c7),\n ShowCreation(c8),\n ShowCreation(c9),\n ShowCreation(c10),\n Transform(text2, text5)\n )\n self.wait(2)\n text7 = TexMobject(r'....').next_to(text6, UP*2).shift(UP*0.5)\n self.play(ShowCreation(text7))\n self.play(Transform(text2, text6))\n self.wait(3)\n self.play(\n FadeOut(text2),\n FadeOut(text7),\n FadeOut(VGroup(c1,c2,c3,c4,c5,c6,c7,c8,c9,c10))\n )\n text8 = TextMobject(r'6=1+2+3\\\\28=1+2+3+....+6+7\\\\496=1+2+3+...+30+31\\\\8128=1+2+3+...+126+127\\\\33550336=1+2+3+...+126+127\\\\....')\n self.play(Write(text8))\n\nclass OtherPattens(Illustration):\n def construct(self):\n text0 = TextMobject(r'2.除了6以外的完全数,都可以表示成连续立方数之和')\n self.showtextthenfade(text0, 2)\n text1 = TextMobject(r'$28=1^3+3^3$\\\\$496=1^3+3^3+5^3$\\\\$8128=1^3+3^3+...+15^3$\\\\$33550336=1^3+3^3+...+127^3$\\\\$...$')\n self.play(Write(text1))\n self.play(FadeOut(text1))\n text2 = TextMobject(r'3.都可以表示成2的连续正整数次幂的和')\n text3 = TextMobject(r'$6=2^1+2^2$\\\\$28=2^2+2^3+2^4$\\\\$496=2^4+2^5+...+2^8$\\\\$8128=2^6+2^7+...+{2^{11}}+{2^{12}}$\\\\$33550336={2^{12}}+{2^{13}}+...+{2^{23}}+{2^{24}}$\\\\$...$')\n self.showtextthenfade(text2, 2)\n self.showtextthenfade(text3, 3)\n text4 = TextMobject(r'4.由于第三条,它们用二进制表示起来就是这样的')\n text5 = TextMobject(r'${6_{10}}=110_2$\\\\${28_{10}}=11100_2$\\\\${496_{10}}=111110000_2$\\\\${8128_{10}}=1111111000000_2$\\\\...')\n self.showtextthenfade(text4, 2)\n self.showtextthenfade(text5, 3)\n text5 = TextMobject(r'5.自己体会吧')\n text6 = TextMobject(r'$2$',r'$8$').scale(2)\n self.play(Write(text6))\n self.wait()\n text7 = TextMobject(r'2',r'+',r'8',r'=',r'1',r'0').scale(2)\n self.play(\n Transform(text6[0], text7[0]),\n Transform(text6[1], text7[2]),\n Write(text7[1]),\n Write(text7[3]),\n )\n self.wait()\n self.play(\n Write(text7[4]),\n Write(text7[5])\n )\n self.wait()\n self.play(\n FadeOut(text6),\n FadeOut(text7[1]),\n FadeOut(text7[3]),\n )\n self.wait()\n text8 = TextMobject(r'1',r'+',r'0',r'=',r'1').scale(2)\n self.play(\n text7[4].move_to, text8[0].get_center(),\n text7[5].move_to, text8[2].get_center(),\n Write(text8[1]),\n Write(text8[3])\n )\n self.wait()\n self.play(Write(text8[4]))\n self.wait()\n self.play(\n FadeOut(VGroup(text8,text7[4],text7[5]))\n )\n self.wait()\n text9 = TextMobject(r'4',r'9',r'6').scale(2)\n text10 = TextMobject(r'4',r'+',r'9',r'+',r'6',r'=',r'1',r'9').scale(2)\n text11 = TextMobject(r'1',r'+',r'9',r'=',r'1',r'0').scale(2)\n text12 = TextMobject(r'1',r'+',r'0',r'=',r'1').scale(2)\n self.play(ShowCreation(text9))\n self.wait()\n self.play(\n Transform(text9[0],text10[0]),\n Transform(text9[1],text10[2]),\n Transform(text9[2],text10[4]),\n Write(VGroup(text10[1],text10[3],text10[5],text10[6],text10[7]))\n )\n self.wait()\n self.play(\n FadeOut(text9),\n FadeOut(VGroup(text10[1],text10[3],text10[5])),\n Transform(text10[6],text11[0]),\n Transform(text10[7],text11[2]),\n )\n self.wait()\n self.play(\n Write(VGroup(text11[1],text11[3],text11[4],text11[5]))\n )\n self.wait()\n self.play(\n FadeOut(VGroup(text10[6],text10[7])),\n text11[4].move_to,text10[6].get_center(),\n text11[5].move_to,text10[7].get_center(),\n Write(text12[4])\n )\n self.wait()\n\nclass HowToFind(Illustration):\n def construct(self):\n text0 = TextMobject(r'科学家是如何寻找完全数的?\\\\它们是否有什么规律?')\n self.showtextthenfade(text0, 2)\n img = ImageMobject(r'.\\Imgs\\Euklid-von-Alexandria.jpg').scale(3).to_edge(LEFT)\n text2 = TextMobject(r'Eucid von Alexandria').next_to(img,DOWN, aligned_edge=LEFT)\n self.play(FadeIn(img))\n self.play(Write(text2))\n text1 = TextMobject(r'这就要从Eucid这个人说起').next_to(img, RIGHT)\n self.play(\n Write(text1),\n text1.shift, UP*3\n )\n text3 = TextMobject(r'Eucid-Eular Theorem:\\\\${2^{p-1}}M_p$ 是完全数($p$为质数)').next_to(text1,DOWN)\n self.play(Write(text3), run_time=2)\n self.wait()\n text4 = TextMobject(r'$M_p$是著名的梅森素数\\\\(Mersenne prime)\\\\${M_p}={2^p}-1$($p$为质数)').next_to(text3,DOWN)\n self.play(Write(text4), run_time=2)\n self.wait()\n text5 = TextMobject(r'下面放上Euler 的证明:\\\\已经给定:\\\\$\\sigma (ab)=\\sigma (a)\\cdot \\sigma (b)$(积性函数)').next_to(text4, DOWN)\n text6 = TextMobject(r'$\\sigma (2^{p-1}({2^p}-1))=\\sigma (2^{p-1}) \\sigma ({2^p}-1)$\\\\',r'$=(2^{p-1})(2^p)$\\\\',r'$=2(2^{p-1})({2^p}-1)$').next_to(text4, DOWN).shift(RIGHT)\n text6[1].next_to(text6[0], DOWN, aligned_edge=LEFT)\n text6[2].next_to(text6[1], DOWN, aligned_edge=LEFT)\n text7 = TextMobject(r'Q.E.D.').next_to(text6, DOWN, aligned_edge=LEFT)\n self.showtextthenfade(text5, 2)\n self.play(Write(text6), run_time=2)\n self.wait()\n self.play(Write(text7))\n self.play(\n FadeOut(VGroup(text1,text2 , text3, text4, text6, text7))\n )\n self.play(FadeOut(img))\n text8 = TextMobject(r'验证一下:\\\\',r'$p=2, \\sigma (2)=6$\\\\',r'$p=3, \\sigma (3)=28$\\\\',r'$p=4, \\sigma (4)=496$\\\\',r'$p=5, \\sigma (5)=8128$\\\\',r'...')\n text8[1].next_to(text8[0], DOWN, aligned_edge=LEFT)\n text8[2].next_to(text8[1], DOWN, aligned_edge=LEFT)\n text8[3].next_to(text8[2], DOWN, aligned_edge=LEFT)\n text8[4].next_to(text8[3], DOWN, aligned_edge=LEFT)\n text8[5].next_to(text8[4], DOWN, aligned_edge=LEFT)\n self.showtextthenfade(text8,3)\n text9 = TextMobject(r'但是,这样只证明了\\\\“$2^{p-1}(2^{p}-1)$在p为质数的情况下是完全数\",\\\\并不囊括所有完全数')\n text10 = TextMobject(r'真正的寻找还需要大量的计算量和高性能的计算机...')\n self.showtextthenfade(text9, 3)\n self.showtextthenfade(text10, 3)\n\nclass EndingScene(Scene):\n def construct(self):\n text = TextMobject(r'Tools:\\\\Python3.7.1(Anaconda)\\\\manim(by 3Blue1Brown)\\\\Pr\\\\BGM:亚麻色头发的少女 By Claude Debussy\\\\Reference:\\\\Wikipedia-Perfect Number')\n self.play(Write(text))\n self.wait(3)\n self.play(FadeOut(text))","sub_path":"5_perfect_number.py","file_name":"5_perfect_number.py","file_ext":"py","file_size_in_byte":13709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"60806724","text":"\r\nimport jieba\r\nimport numpy as np\r\nimport difflib\r\nimport requests\r\n\r\nfrom pymongo import MongoClient\r\n\r\n\r\nconn = MongoClient('127.0.0.1',27017)\r\ndb = conn.chatroom\r\nchat = db.chat\r\ndbname2 = db.dbname2\r\n# count = list(QA.find())\r\n\r\n\r\n\r\n\r\n\r\ndef Get_Qlist(lst):\r\n ques = []\r\n for i in lst:\r\n ques.append(i['qs'])\r\n return ques\r\n\r\n\r\n\r\n\r\n\r\n\r\n#创建停用词list\r\ndef stopwordlist(filepath):\r\n stopwords = [line.strip() for line in open(filepath,'r',encoding='utf-8').readlines()]\r\n return stopwords[0]\r\n\r\n# 调用停用词,对语句进行分词处理,为计算相似度做准备\r\ndef seg_sentence(sentence):\r\n question = {}\r\n sentence_seged = jieba.cut(sentence.strip())\r\n stopwords = stopwordlist(r'D:/stopword.txt')\r\n outstr = ''\r\n for word in sentence_seged:\r\n if word not in stopwords:\r\n if word != '\\t':\r\n outstr += word\r\n outstr += ''\r\n question[outstr] = sentence\r\n return question\r\n\r\n\r\n\r\n#计算用户问题和问答库中问题的相似度\r\ndef cal_similiar(question,sen):\r\n str_data = {}\r\n length = len(sen)\r\n median_list = {}\r\n for item in question.keys(): #调节因字符串长短而导致的相似度误差问题\r\n num = np.median([len(item),length])\r\n if abs(length - num) != 0:\r\n xx = (abs(length - num)) * 0.009 #自定义一个权值\r\n else:\r\n xx = 0\r\n median_list[item] = xx #xx为偏移量\r\n\r\n for k,v in median_list.items(): #重新计算语句相似度,并返回字典形式为问答库内问题及该问题与用户问题的相似度\r\n fraction = difflib.SequenceMatcher(None,sen,k).quick_ratio()-v\r\n s = question[k]\r\n str_data[s] = fraction #语句相似度\r\n\r\n return str_data\r\n\r\n\r\n#根据计算后的相似度进行问答库内问题匹配\r\ndef matching_question(lst,sen):\r\n px = {}\r\n for i in lst:\r\n question = seg_sentence(i) #调用停用词处理函数,返回字典\r\n str_data = cal_similiar(question,sen) #调用计算相似度算法,参数为停用词函数返回的字典\r\n for k,v in str_data.items():\r\n px[i] = v\r\n tupl_data = sorted(px.items(),key=lambda x:x[1], reverse=True)\r\n for item in tupl_data:\r\n if item[1] > 0.3:\r\n\r\n return tupl_data[0][0]\r\n else:\r\n return sen\r\n\r\n# qustion = matching_question(ques,que1)\r\ndef Get_Ans(qu,qustion):\r\n for i in qu:\r\n\r\n if i['qs'] == qustion:\r\n i['count'] += 1\r\n chat.update({'qs': qustion}, {'$set': {'count': i['count']}})\r\n return i['an']\r\n else:\r\n return False\r\ndef Save_Ques(no_q,no_qustion):\r\n if no_qustion not in Get_Qlist(no_q):\r\n dbname2.insert({'qs':no_qustion,'an':'', 'count': 1})\r\n else:\r\n for i in no_q:\r\n if i['qs'] == no_qustion:\r\n i['count'] += 1\r\n dbname2.update({'qs': no_qustion}, {'$set': {'count': i['count']}})\r\n return '这个问题太难了,我还没有学习到哦,我会继续努力学习的亲,么么哒~~!'\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"similar.py","file_name":"similar.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"413014409","text":"# if write a fibonacci number search function by recursion\n# return the number at position n, if the number start from 1\n# this code is considered as inefficient because it has to call the function twice in each recursion\n\ndef fib(n):\n global numFibCalls\n numFibCalls += 1\n if n == 1:\n return 1\n elif n == 2:\n return 2\n else:\n return fib(n-1) + fib(n-2)\n\nnumFibCalls = 0\nprint(fib(35))\nprint(\"called\", numFibCalls, \"times\")\n# it repeatedly compute the fib(3), fib(2), and fib(1)\n# use dict to avoid\n\ndef fib_e(n, d):\n global numFibCalls\n numFibCalls += 1\n\n if n in d:\n return d[n]\n else:\n ans = fib_e(n-1, d) + fib_e(n-2, d)\n d[n] = ans # add calculated result of n > 2, to the dict\n return ans # called memoization method\n\n# set a base dict\nnumFibCalls = 0\nd = {1: 1, 2: 2}\nprint(fib_e(35, d)) # the dict will become longer and longer during the run, calculation become faster\nprint(\"called\", numFibCalls, \"times\")\n","sub_path":"ProgrammingCourses/MIT6001X/week3/dict_fib_application.py","file_name":"dict_fib_application.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"276902529","text":"import vrep\nimport time\nimport math\n\n# Vrep connection\nvrep.simxFinish(-1) # Ending of previous communication open\nclientID = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5)\n\n\nif clientID != -1:\n print(\"Connection succeed\")\n returnCode2, handle2 = vrep.simxGetObjectHandle(clientID, 'div_join_1', vrep.simx_opmode_oneshot_wait)\n print(handle2)\n actualPos = vrep.simxGetJointPosition(clientID, handle2,vrep.simx_opmode_oneshot)\n print(actualPos)\n actualPos = vrep.simxSetJointTargetVelocity(clientID,handle2,0.5,vrep.simx_opmode_oneshot_wait)\n actualPos = vrep.simxGetJointPosition(clientID, handle2, vrep.simx_opmode_oneshot)\n print(actualPos)\n\n\n\n\nelse:\n print(\"Failed connection\")","sub_path":"SIMULACION_Vrep/PROGRAMACION_Vrep/PruebaProgVrep/PruebaPrograma/Programa.py","file_name":"Programa.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"123044064","text":"from __future__ import print_function\nimport copy\n\nfrom sklearn.metrics import precision_recall_curve, precision_recall_fscore_support\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n\n\n\"\"\"\nHere we have some useful functions for inference and evaluation\n\nparams: \n1. results: should be the results from a topic model (or a cluster based topic model),\n each line refer to a sample's scores for all topics(clusters)\n\n2. interesting_topic_ids: a list of relevant topic(cluster) ids, should be labeled manually\n\"\"\"\n\n\ndef read_label(raw_label, level=1):\n label = copy.copy(raw_label)\n for i in range(len(label)):\n if label[i] > 0 and label[i] <= level:\n label[i] = 1\n elif label[i] > level:\n label[i] = 0\n return label\n\n\n\ndef _parse_model_results(results):\n # results is the output of a pipeline model\n max_topic_id = np.argmax(results, axis=1)\n max_topic_score = np.max(results, axis=1)\n return max_topic_id, max_topic_score\n\ndef _predict_score(max_topic_id, max_topic_score, interesting_topic_ids):\n scores = []\n for tid, tscore in zip(max_topic_id, max_topic_score):\n if tid in interesting_topic_ids:\n scores.append(tscore)\n else:\n scores.append(0)\n return np.array(scores)\n\ndef _predict(scores, threshold):\n # use a universal threshold to predict\n pred = scores > threshold\n return np.int64(pred)\n\ndef _predict_topic_specific(max_topic_id, scores, threshold_ts):\n # use topic specific threshold to predict\n # threshold_ts: a dict contains each topic's threshold\n pred = []\n for tid, score in zip(max_topic_id, scores):\n if tid not in threshold_ts:\n pred.append(0)\n elif score > threshold_ts[tid]:\n pred.append(1)\n else:\n pred.append(0)\n return np.array(pred)\n \ndef predict_score(results, interesting_topic_ids):\n max_topic_id, max_topic_score = _parse_model_results(results)\n return _predict_score(max_topic_id, max_topic_score, interesting_topic_ids)\n\ndef predict(results, interesting_topic_ids, threshold):\n scores = predict_score(results, interesting_topic_ids)\n return _predict(scores, threshold)\n\ndef predict_topic_specific(results, interesting_topic_ids, threshold_ts):\n max_topic_id, max_topic_score = _parse_model_results(results)\n scores = _predict_score(max_topic_id, max_topic_score, interesting_topic_ids)\n return _predict_topic_specific(max_topic_id, scores, threshold_ts)\n\ndef report_curve(y_true, y_pred_score, r=0,p=0):\n precision, recall, ts = precision_recall_curve(y_true, y_pred_score)\n f1_score = 2*precision*recall/(precision+recall+1e-10)\n plt.step(recall, precision, color='b', alpha=0.2,where='post')\n plt.fill_between(recall, precision, step='post', alpha=0.2,color='b')\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.title('2-class Precision-Recall curve')\n if r and p:\n plt.plot(r, p, '*')\n plt.show() \n\ndef choose_topic_threshold(results, y_true, interesting_topic_ids, pt=0.5, verbose=False):\n precison_dict = {}\n recall_dict = {}\n threshold_dict = {}\n \n for topic_id in interesting_topic_ids:\n y_pred_score = predict_score(results, [topic_id])\n if verbose:\n print(\"Topic id: %d\" %(topic_id))\n report_curve(y_true, y_pred_score)\n precision, recall, thresholds = precision_recall_curve(y_true, y_pred_score)\n \n # set some condition\n precison_dict[topic_id] = precision[1:-1]\n recall_dict[topic_id] = recall[1:-1]\n threshold_dict[topic_id] = thresholds[1:]\n \n def get_threshold_ts(precison_dict, recall_dict, threshold_dict, t):\n thresholds_ts = {}\n for topic_id in precison_dict.keys():\n if len(threshold_dict[topic_id]) == 0:\n thresholds_ts[topic_id] = 1\n else:\n pre = precison_dict[topic_id]\n r = recall_dict[topic_id]\n ts = threshold_dict[topic_id]\n p = pre[pre > t]\n r = r[pre > t]\n ts = ts[pre > t]\n if len(p) == 0:\n thresholds_ts[topic_id] = 1\n else:\n f1 = 2*p*r/(p+r)\n max_id = np.argmax(f1)\n thresholds_ts[topic_id] = ts[max_id]\n return thresholds_ts\n \n if type(pt) == list:\n thresholds_ts = []\n for t in pt:\n thresholds_ts.append(get_threshold_ts(precison_dict, recall_dict, threshold_dict, t))\n return thresholds_ts\n else:\n return get_threshold_ts(precison_dict, recall_dict, threshold_dict, pt)\n\n\ndef _evaluate_ts(results, y_true, interesting_topic_ids, threshold_ts_all):\n p_ts, r_ts, f1_ts = [], [], []\n for thresholds_ts in threshold_ts_all:\n y_pred_ts_all = predict_topic_specific(results, interesting_topic_ids, thresholds_ts)\n p,r,f,s = precision_recall_fscore_support(y_true, y_pred_ts_all)\n p_ts.append(p[1])\n r_ts.append(r[1])\n f1_ts.append(2*p[1]*r[1]/(p[1]+r[1]))\n return p_ts, r_ts, f1_ts\n\n\ndef auto_evaluate(p_model, test_samples, y_true, n_components, pt_list=[0.4, 0.45, 0.5, 0.55, 0.60], verbose=False):\n # auto evaluate model\n test_results = p_model.transform(test_samples)\n ITid_all = range(n_components)\n y_pred_score_all = predict_score(test_results, ITid_all)\n threshold_ts_all = choose_topic_threshold(test_results, y_true, ITid_all, pt_list, verbose)\n p_ts, r_ts, f1_ts = _evaluate_ts(test_results, y_true, ITid_all, threshold_ts_all)\n if verbose:\n report_curve(y_true, y_pred_score_all, r_ts, p_ts)\n return p_ts, r_ts, f1_ts\n\n\ndef auto_dev_and_evaluate(p_model, X_dev, Y_dev, X_test, Y_test, n_components, \n pt_list=[0.4, 0.45, 0.5, 0.55, 0.60], verbose=False):\n # find thresholds_ts based on dev set\n results_dev = p_model.transform(X_dev)\n ITid_all = range(n_components)\n Y_dev_pred_score_all = predict_score(results_dev, ITid_all)\n threshold_ts_all = choose_topic_threshold(results_dev, Y_dev, ITid_all, pt_list, verbose)\n # dev performance\n p_ts_dev, r_ts_dev, f1_ts_dev = _evaluate_ts(results_dev, Y_dev, ITid_all, threshold_ts_all)\n\n # test performance\n results_test = p_model.transform(X_test)\n p_ts_test, r_ts_test, f1_ts_test = _evaluate_ts(results_test, Y_test, ITid_all, threshold_ts_all)\n\n return p_ts_dev, r_ts_dev, f1_ts_dev, p_ts_test, r_ts_test, f1_ts_test\n ","sub_path":"relevant_analysis/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"614251321","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os, json\n\ncourses_in = {}\ncourses_out = {}\nmajors_in = {}\nmajors_out = {}\n\nwith open('courses.json') as outfile:\n\tcourses_in = json.load(outfile)\n\nwith open('courses.txt', 'w') as outfile:\n\ti = 1\n\tfor c in courses_in:\n\t\toutfile.write(\"%d\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % \n\t\t\t(\n\t\t\t\ti,\n\t\t\t\tc[\"title\"].encode('ascii', 'ignore'),\n\t\t\t\tc[\"cid\"].encode('ascii', 'ignore'),\n\t\t\t\tc[\"terms\"].encode('ascii', 'ignore'),\n\t\t\t\tc[\"instructors\"].encode('ascii', 'ignore'),\n\t\t\t\tc[\"credits\"],\n\t\t\t\tc[\"overview\"].encode('ascii', 'ignore')\n\t\t\t)\n\t\t)\n\t\ti+=1\n\n# with open('courses.json') as outfile:\n# \tcourses_in = json.load(outfile)\n\n# with open('majors.json') as outfile:\n# \tmajors_in = json.load(outfile)\n\n# for course in courses_in:\n# \tif course[\"subj\"] not in courses_out : courses_out[course[\"subj\"]] = []\n# \tcourses_out[course[\"subj\"]].append(course)\n\n# for major in majors_in:\n# \tif major[\"major\"] not in majors_out : majors_out[major[\"major\"]] = []\n# \tmajors_out[major[\"major\"]].append(major[\"course\"])\n\n# with open('courses_formated.json', 'w') as outfile:\n# \tjson.dump(courses_out, outfile)\n\n# with open('majors_formated.json', 'w') as outfile:\n# \tjson.dump(majors_out, outfile)\n\n","sub_path":"getCourses/format_json.py","file_name":"format_json.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"311022080","text":"from RentCar.Vehicle.LuxuryCar import LuxuryCar\nfrom RentCar.Vehicle.SmallCar import SmallCar\nfrom RentCar.Vehicle.Truck import Truck\nfrom RentCar.Vehicle.FullSizeCar import FullSizeCar\nfrom RentCar.Vehicle import Vehicle\n\nfrom importlib import reload\nreload(Vehicle)\n\nclass VehicleFactory:\n\t__instance = None\n\t@staticmethod\n\tdef getVehicleFactory():\n\t\tif (VehicleFactory.__instance):\n\t\t\treturn VehicleFactory.__instance\n\t\tVehicleFactory()\n\n\tdef __init__(self):\n\t\tif (VehicleFactory.__instance):\n\t\t\traise Exception('\\n ERROR! VehicleFactory is a singleton class \\n')\n\t\telse:\n\t\t\tVehicleFactory.__instance = self\n\n\tdef makeVehicle(vehicleID, vehicleName, makeCompany, modelYear, currentMileage, location, timeLastServiced, registrationTag, vehicleCondition, vehicleType, seatCapacity):\n\t\tvehicleObject = None\n\t\tif (vehicleType == \"SMALLCAR\"):\n\t\t\tvehicleObject = SmallCar(vehicleID, vehicleName, makeCompany, modelYear, currentMileage, location, timeLastServiced, registrationTag, vehicleCondition, seatCapacity)\n\t\telif (vehicleType == \"FULLSIZECAR\"):\n\t\t\tvehicleObject = FullSizeCar(vehicleID, vehicleName, makeCompany, modelYear, currentMileage, location, timeLastServiced, registrationTag, vehicleCondition, seatCapacity)\n\t\telif (vehicleType == \"TRUCK\"):\n\t\t\tvehicleObject = Truck(vehicleID, vehicleName, makeCompany, modelYear, currentMileage, location, timeLastServiced, registrationTag, vehicleCondition, seatCapacity)\n\t\telif (vehicleType == \"LUXURYCAR\"):\n\t\t\tvehicleObject = LuxuryCar(vehicleID, vehicleName, makeCompany, modelYear, currentMileage, location, timeLastServiced, registrationTag, vehicleCondition, seatCapacity)\n\t\treturn vehicleObject\n","sub_path":"RentCar/Vehicle/VehicleFactory.py","file_name":"VehicleFactory.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"}