diff --git "a/4161.jsonl" "b/4161.jsonl" new file mode 100644--- /dev/null +++ "b/4161.jsonl" @@ -0,0 +1,710 @@ +{"seq_id":"253775753","text":"#!/usr/bin/python3\n\nimport requests\nimport os\nimport time\nimport datetime\nimport json\nimport threading\n\ng_defGetAPITuple = (\"getABNormalEventProperty\",\n \"getAccessPointPrty\",\n \"getAlarmProperty\",\n \"getAlarmPropertyEx\",\n \"getCloudBindInfo\",\n \"getCruiseLinePropertyEx\",\n \"getDDNS\",\n \"getDeviceInfo\",\n \"getDstProperty\",\n \"getDvrDDNS\",\n \"getEncodingProperty2\",\n \"getEncodingScope2\",\n \"getFillLightSettings\",\n \"getFishEyeProperty\",\n \"getIoProperty\",\n \"getLogFile\",\n \"getNetParam\",\n \"getNetPropertyEx\",\n \"getNvrRegularRecordProperty\",\n \"getRegularRecordProperty\",\n \"getOsdEx\",\n \"getP2P\",\n \"getP2PInfo\",\n \"getPPTPPrty\",\n \"getPTZInfo\",\n \"getPTZProperty\",\n \"getROIProperty\",\n \"getSecurity\",\n \"getSerialProperty\",\n \"getSmartBDPrty\",\n \"getSmartIDPrty\",\n \"getSmartMDPrty\",\n \"getSmartSCPrty\",\n \"getSmtp2\",\n \"getStorageInfo2\",\n \"getSuitableEncodingProperty\",\n \"getSystemRebootExProperty\",\n \"getTimeParam\",\n \"getTimeReboot\",\n \"getUserConfig\",\n \"getVideoCoverAlarmProperty\",\n \"getWifiList\",\n )\n\ng_defSetAPITuple = (\n \"setABNormalEventProperty\",\n \"setAccessPointPrty\",\n \"setAlarmProperty\",\n \"setAlarmPropertyEx\",\n \"setCruiseLinePropertyEx\",\n \"setDDNS\",\n \"setDstProperty\",\n \"setEncodingProperty2\",\n \"setFillLightSettings\",\n \"setFishEyeProperty\",\n \"setIoProperty\",\n \"setNetParam\",\n \"setNetPropertyEx\",\n \"setOsdEx\",\n \"setP2P\",\n \"setPPTPPrty\",\n \"setPTZInfo\",\n \"setPTZProperty\",\n \"setROIProperty\",\n \"setSecurity\",\n \"setSerialProperty\",\n \"setSmartBDPrty\",\n \"setSmartIDPrty\",\n \"setSmartMDPrty\",\n \"setSmartSCPrty\",\n \"setSmtp2\",\n \"setSystemRebootExProperty\",\n \"setTimeParam\",\n \"setVideoCoverAlarmProperty\",\n )\n\n\ndef runThreading(url, sessionID, id):\n print (\"开始线程:\", url, sessionID)\n while True:\n #print(\"id = \", id)\n testGetInfo(url, sessionID)\n\n time.sleep(0.1)\n print (\"退出线程:\")\n\n\ndef CGItest() :\n headers = {\"Content-Type\": \"application/json\"}\n dataDict = {\"api\": \"login\", \"data\": {}}\n dataJson = json.dumps(dataDict)\n response = requests.post(\"http://172.18.198.205:80/cgi-bin/test.cgi\", headers=headers, data=dataJson)\n if response.status_code != 200:\n print(\"response.status_code:\", response.status_code)\n return False\n print(response.text)\n\n\n\ndef login(url, user, pwd) :\n headers = {\"Content-Type\": \"application/json\"}\n dataDict = {\"api\": \"login\", \"data\": {\"username\": user, \"password\": pwd}}\n dataJson = json.dumps(dataDict)\n\n response = requests.post(url, headers=headers, data=dataJson)\n if response.status_code != 200:\n print(\"response.status_code:\", response.status_code)\n return False\n\n jsonDict = json.loads(response.text)\n if not isinstance(jsonDict, dict):\n print(\"jsonDict isn't dict: \", type(jsonDict))\n\n retCode = jsonDict[\"code\"]\n if retCode != 0 :\n print(\"login fail\")\n return False\n\n return response.cookies.get(\"sessionID\")\n#def login(user, pwd) end\n\n\ndef GetapiSwitch(api):\n switcher = {\n \"getABNormalEventProperty\": {\"abnormalType\": \"1\"},\n \"getLogFile\":{\"startTime\":\"1518048000\",\"endTime\":\"1518134399\",\"bufNum\":\"1000\"},\n \"getStorageInfo2\": {\"storageType\": \"1\"}\n }\n return switcher.get(api, {})\n\n\ndef testGetInfo(url, sessionID):\n global g_defGetAPITuple\n apiTuple = g_defGetAPITuple\n #apiTuple = (\"getAlarmPropertyEx\",)\n\n headers = {\"Cookie\": \"sessionID=\" + sessionID}\n\n for api in apiTuple:\n dataDict = {\"api\": api, \"data\": GetapiSwitch(api)}\n dataJson = json.dumps(dataDict)\n\n beginTime = time.time()\n response = requests.post(url, headers=headers, data=dataJson)\n if response.status_code != 200:\n print(\"response.status_code:\", response.status_code)\n continue\n responseTime = time.time() - beginTime\n #print(dataJson)\n print(api + \": response time is \" + \"{0}ms\".format(int(responseTime * 1000)))\n print(\"len = \" + str(len(response.text)) + \"\\r\\n\")\n\n # jsonDict = json.loads(response.text)\n # if not isinstance(jsonDict, dict):\n # print(\"jsonDict isn't dict: \", type(jsonDict))\n # print(jsonDict[\"code\"])\n # print(jsonDict[\"data\"])\n#def testGetInfo(url, sessionID) end\n\ndef SetapiSwitch(api):\n switcher = {\n \"setABNormalEventProperty\" :{},\n \"setAccessPointPrty\" :{},\n \"setAlarmProperty\" :{},\n \"setAlarmPropertyEx\" :{},\n \"setCruiseLinePropertyEx\" :{},\n \"setDDNS\" :{},\n \"setDstProperty\" :{},\n \"setEncodingProperty2\" :{},\n \"setFillLightSettings\" :{},\n \"setFishEyeProperty\" :{},\n \"setIoProperty\" :{},\n \"setNetParam\" :{},\n \"setNetPropertyEx\" :{},\n \"setOsdEx\" :{},\n \"setP2P\" :{},\n \"setPPTPPrty\" :{},\n \"setPTZInfo\" :{},\n \"setPTZProperty\" :{},\n \"setROIProperty\" :{},\n \"setSecurity\" :{},\n \"setSerialProperty\" :{},\n \"setSmartBDPrty\" :{},\n \"setSmartIDPrty\" :{},\n \"setSmartMDPrty\" :{},\n \"setSmartSCPrty\" :{},\n \"setSmtp2\" :{},\n \"setSystemRebootExProperty\" :{},\n \"setTimeParam\" :{},\n \"setVideoCoverAlarmProperty\" :{},\n }\n return switcher.get(api, {})\n\ndef testSetAPI(url, sessionID):\n global g_defSetAPITuple\n\n apiTuple = g_defSetAPITuple\n headers = {\"Cookie\": \"sessionID=\" + sessionID}\n\n for api in apiTuple:\n dataDict = {\"api\": api, \"data\": SetapiSwitch(api)}\n dataJson = json.dumps(dataDict)\n response = requests.post(url, headers=headers, data=dataJson)\n if response.status_code != 200:\n print(\"response.status_code:\", response.status_code)\n continue\n print(dataJson)\n print(api + \":\")\n print(response.text + \"\\r\\n\")\n#def testSetAPI(url, sessionID) end\n\n\ndef ConcurrentTestIPCNewOCX():\n # 创建新线程\n thread1 = threading.Thread(target = runThreading, args=(url, sessionID, 1))\n thread1.start()\n thread2 = threading.Thread(target = runThreading, args=(url, sessionID, 2))\n thread2.start()\n thread3 = threading.Thread(target = runThreading, args=(url, sessionID, 3))\n thread3.start()\n thread4 = threading.Thread(target = runThreading, args=(url, sessionID, 4))\n thread4.start()\n thread5 = threading.Thread(target = runThreading, args=(url, sessionID, 5))\n thread5.start()\n\n thread6 = threading.Thread(target = runThreading, args=(url, sessionID, 6))\n thread6.start()\n thread7 = threading.Thread(target = runThreading, args=(url, sessionID, 7))\n thread7.start()\n thread8 = threading.Thread(target = runThreading, args=(url, sessionID, 8))\n thread8.start()\n thread9 = threading.Thread(target = runThreading, args=(url, sessionID, 9))\n thread9.start()\n thread10 = threading.Thread(target = runThreading, args=(url, sessionID, 10))\n thread10.start()\n\n time.sleep(100000)\n#def testIPCNewOCX() end\n\n\n\nwhile True:\n try:\n url = \"http://172.18.198.212:80/api.html\"\n sessionID = login(url, \"admin\", \"admin\")\n if False == sessionID:\n print(\"login fail\")\n exit(-1)\n\n\n\n #ConcurrentTestIPCNewOCX()\n testGetInfo(url, sessionID)\n exit(0)\n except ValueError as err: # as err可以去掉, 异常为ValueError时执行这里\n print(\"Oops! That was no valid number:\", err)\n else: # 没有异常执行完try里面的会到这里\n print(\"oh yes \")\n\n time.sleep(1)\n","sub_path":"pySrc/testOCXPOST.py","file_name":"testOCXPOST.py","file_ext":"py","file_size_in_byte":8582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"30929533","text":"#!/usr/bin/python3\n#NOTE: The initial version does NOT support creating its own virtual outputs ; an existing physical or virtual output must be hijacked.\nimport os\nfrom subprocess import check_output\n\ndef aggregateOutputDevices()->list:\n xOutput = check_output('xrandr')\n xOutput = xOutput.decode('utf-8') #Get rid of byte-encoding, ew.\n xList = xOutput.split('\\n')\n\n viableOutputHostNames = [\"DP\",\"VGA\",\"DisplayPort\",\"HDMI\",\"VIRTUAL\",\"DVI\"] #Add port types here if your system supports other display technology, such as S-Video or Apple Display Connector.\n \n portLines = []\n for line in xList:\n for portType in viableOutputHostNames:\n if portType in line:\n portLines.append(line)\n \n cPorts = []\n dPorts = [] #Create empty lists for connected and disconnected ports.\n\n #Sort the ports into connected and disconnected.\n for line in portLines:\n if \"disconnected\" in line:\n dPorts.append(line)\n elif \"connected\" in line:\n cPorts.append(line)\n \n for x in range(len(cPorts)):\n cPorts[x]=cPorts[x].split(\" \")[0]\n \n for x in range(len(dPorts)):\n dPorts[x]=dPorts[x].split(\" \")[0]\n\n return [cPorts,dPorts]\n\ndef realX(): #Dead simple python wrapper to call xrandr to reorient itself with only real displays.\n os.system('xrandr --auto')\n\ndef addmodeX(display:str='VIRTUAL1',width='768',height='768',rRate='60'):\n os.system('xrandr --addmode '+display+' '+width+'x'+height+'_'+rRate)\n\ndef createVMM(width:str='768',height:str='768',rRate:str='60'): #Create a virtual monitor mode, and set the properties of the virtual display.\n\n #modeInfo = check_output(['gtf', '600', '975' ,'60']) #Sanity test run.\n\n modeInfo = check_output(['gtf',width,height,rRate]) #A bit of junk gets added. :(\n findModeline = modeInfo.find(b'Modeline') #Find Modeline subsection. The b before Modeline translates the string into a format that the find method can understand, a side-effect of Python 3.5.\n\n #print(\"Found beginning of Modeline at: \",findModeline) Debug.\n #print(modeInfo[findModeline+9:]) Debug.\n #print(modeInfo[findModeline+9:].decode('utf-8')) Debug.\n\n modeline = modeInfo[findModeline+9:].decode('utf-8') #Have to truncate modeInfo to get the string we want to pass to xrandr, along with re-formatting the byte-encoded string acquired from earlier.\n os.system(\"xrandr --newmode \"+modeline)\n\ndef outputVirtualDisplay(hostPort:str,width,height,rRate,position:str='--left-of',relativeMonitor:str='DisplayPort-1'):\n os.system('xrandr --output '+hostPort+' --mode '+width+'x'+height+'_'+rRate+' '+position+' '+relativeMonitor)\n\ndef pressMe(hostPort,width,height,rRate,position,relativeMonitor):\n createVMM(width,height,rRate)\n addmodeX(hostPort,width,height,rRate)\n outputVirtualDisplay(hostPort,width,height,rRate,position,relativeMonitor)\n \n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"74218145","text":"physicsScore = [15, 12, 8, 8, 7, 7, 7, 6, 5, 3]\nhistoryScores = [10, 25, 17, 11, 13, 17, 20, 13, 9, 15]\n\n\ndef pearsonr(x, y):\n # Assume len(x) == len(y)\n n = len(x)\n sum_x = float(sum(x))\n sum_y = float(sum(y))\n sum_x_sq = sum(map(lambda x: pow(x, 2), x))\n sum_y_sq = sum(map(lambda x: pow(x, 2), y))\n psum = sum(map(lambda x, y: x * y, x, y))\n num = psum - (sum_x * sum_y/n)\n den = pow((sum_x_sq - pow(sum_x, 2) / n) * (sum_y_sq - pow(sum_y, 2) / n), 0.5)\n if den == 0: return 0\n return num / den\n\n\nprint(\"%.3f\" % pearsonr(physicsScore, historyScores))\n","sub_path":"HackerRank/KPCorelationCoeff.py","file_name":"KPCorelationCoeff.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"305713499","text":"'''\nCreated on Jan 19, 2015\n\n@author: Trevor Boone\n'''\nimport os\n\n\n\nDEFAULT_SCALE = 1e-5\n\ndef parse(filename, scaler = DEFAULT_SCALE):\n '''\n returns a dictionary for each tool with points, and diameter\n points dict of x, y\n \n It is in the following format where T0 and T1 are\n tool name (javascript implementation at time of writing doesn't\n care about what the key values for the tools actually are)::\n {\"T0\" : {\"diameter\": 5.1, \"points\": [{'x': 0.0, 'y': 0.0}, {'x': 234.0, 'y': 15.0}]},\n {\"T1\" : ...\n }\n '''\n scale = lambda position: scaler * position\n \n f = open(filename, 'r')\n \n # parsing tools\n tools = {\"None\": {\"diameter\":1, \"holes\": []}}\n\n while(True):\n line = f.readline()\n if line[1] == \"T\":\n tool = line[1:line.find(\" \")]\n diameter_start = line.find(\"=\") + 2\n diameter_end = line.find(\" \", diameter_start)\n diameter = float(line[diameter_start: diameter_end])\n tools[tool] = {\"diameter\": diameter, \"holes\": []}\n if line.find(\";\") == -1:\n break\n \n cur_tool = \"None\"\n \n line = f.readline().strip()\n x = 0\n y = 0\n \n # commands implemented (T#, R#[X# or Y#], X#Y#, [X# or Y#])\n while(line != \"\"):\n if line[0] == \"T\":\n cur_tool = line.strip()\n else:\n if line[0] == \"R\":\n xpos = line.find(\"X\")\n ypos = line.find(\"Y\")\n iter_end = xpos if xpos >= 0 else ypos\n iterations = int(line[1:iter_end])\n if xpos >= 0:\n num_end = ypos if ypos >=0 else 0\n x_incr = float(line[xpos+1:num_end]) if num_end > xpos else float(line[xpos+1:])\n else:\n x_incr = 0\n if ypos >= 0:\n y_incr = float(line[ypos+1:])\n else:\n y_incr = 0\n for i in range(iterations):\n x += x_incr\n y += y_incr\n tools[cur_tool]['holes'].append({\"x\": scale(x), \"y\": scale(y)})\n else:\n xpos = line.find(\"X\")\n ypos = line.find(\"Y\")\n #print(\" \".join((xpos.__str__(), ypos.__str__())))\n if xpos >= 0:\n num_end = ypos if ypos >=0 else 0\n x = float(line[xpos+1:num_end]) if num_end > xpos else float(line[xpos+1:]) \n if ypos >= 0:\n y = float(line[ypos+1:])\n tools[cur_tool]['holes'].append({\"x\": scale(x), \"y\": scale(y)})\n line = f.readline().strip()\n f.close()\n return tools\n \n \nclass FileHandler:\n '''\n handles the calls that affect the server filesystem\n '''\n def __init__(self, messager, name, directory):\n '''\n Constructs a FileHandler\n Args:\n messager (MessageBuffer): MessageBuffer that handles the passing of messages to the webserver\n name (str): key in the message dictionary that this object will store it's messages in\n directory (str): All of the files read, created, or deleted by this object will be in this directory\n '''\n self.messager = messager\n self.name = name\n self.directory = directory\n self.messager.notify(name, self.listFiles())\n \n def listFiles(self):\n '''\n lists files in the current directory\n '''\n return os.listdir(self.directory)\n \n def addFile(self, name, data):\n '''\n adds a file to the directory specified in the constructor\n \n Args:\n name (str): name of the file to be created\n data (str): contents of the file to be writted\n '''\n path = os.path.join(self.directory, name)\n self.raiseIfInvalid(path)\n self.deleteFile(path)\n f = open(path, 'w')\n f.write(data)\n f.close()\n self.messager.notify(self.name, self.listFiles())\n \n def deleteFile(self, name):\n '''\n deletes a file to the current directory\n Args:\n name (str): name of the file to be deleted\n '''\n path = os.path.join(self.directory, name)\n self.raiseIfInvalid(path)\n if os.path.exists(path) and os.path.isfile(path):\n os.remove(path)\n self.files = os.listdir(self.directory)\n self.messager.notify(self.name, self.listFiles())\n \n def getFile(self, name):\n '''\n returns a parsed version of a file in the current directory\n Returns:\n dict: see documenation for parse\n '''\n path = os.path.join(self.directory, name)\n self.raiseIfInvalid(path)\n return parse(path)\n\n def raiseIfInvalid(self, path):\n '''\n raises a name error if the file path isn't in the current directory\n '''\n if os.path.dirname(path) != self.directory:\n raise NameError(\"Invalid file name: file name has to resolve to same directory\")\n \nif __name__ == \"__main__\":\n points = parse(\"drill.drl\")\n \n print(points)\n","sub_path":"FileHandler.py","file_name":"FileHandler.py","file_ext":"py","file_size_in_byte":5181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"240468553","text":"'''\n作业:根据词典,输出一段文本所有可能的切割方式\n'''\n\n\n#词典,每个词后方存储的是其词频,仅为示例,也可自行添加\nDict = {\"经常\":0.1,\n \"经\":0.05,\n \"有\":0.1,\n \"常\":0.001,\n \"有意见\":0.1,\n \"歧\":0.001,\n \"意见\":0.2,\n \"分歧\":0.2,\n \"见\":0.05,\n \"意\":0.05,\n \"见分歧\":0.05,\n \"分\":0.1}\n\nsentence = \"经常有意见分歧\"\n\n\ndef cut_method(string, word_dict):\n def sentences(cur):\n result = []\n if cur < len(string):\n for next in range(cur + 1, len(string) + 1):\n if (string[cur:next] in word_dict):\n result = result + [string[cur:next] + (tail and ',' + tail) for tail in\n sentences(next)]\n else:\n return ['']\n return result\n\n words = []\n for line in sentences(0):\n line = line.split(',')\n words.append(line)\n\n return words\n\nif __name__ == '__main__':\n print(cut_method(sentence, Dict))\n\n\"\"\"\n预��输出\n[['经常', '有意见', '分歧'], \n ['经常', '有意见', '分', '歧'],\n ['经常', '有', '意见', '分歧'], \n ['经常', '有', '意见', '分', '歧'], \n ['经常', '有', '意', '见分歧'], \n ['经常', '有', '意', '见', '分歧'], \n ['经常', '有', '意', '见', '分', '歧'], \n ['经', '常', '有意见', '分歧'], \n ['经', '常', '有意见', '分', '歧'], \n ['经', '常', '有', '意见', '分歧'], \n ['经', '常', '有', '意见', '分', '歧'], \n ['经', '常', '有', '意', '见分歧'], \n ['经', '常', '有', '意', '见', '分歧'], \n ['经', '常', '有', '意', '见', '分', '歧']]\n\"\"\"\n\n","sub_path":"125-吕军-北京/week03/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"397790765","text":"# -----------------------------------------------------\t#\n# SmartMirror v1.0 \t\t\t\t\t\t\t\t\t\t#\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\n# Nicolau Tahan e Arthur Demarchi - 29/03/17\t\t\t#\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\n# Powered by Dark Sky (https://darksky.net/poweredby/)\t#\n# -----------------------------------------------------\t#\n\nfrom Tkinter import *\nfrom PIL import Image, ImageTk\nimport time\nimport feedparser\nimport json\nimport requests\n\n# -------------------------------- Variaveis Globais -------------------------------------------------- #\n\nclima_token = 'bb32dfc53f3c76134d49fef7e8c83801'\nclima_lang = 'en'\nclima_unit = 'si'\n\nfonte = 'CODE Light'\nsmall_text_size = 13\nmedium_text_size = 17\ntext_size = 20\nlarge_text_size = 35\nxlarge_text_size = 70\n\ntime_refresh_rate = 200 #ms\nrefresh_rate = 60000 #ms\n\nicon_dicionario = {\n 'clear-day': \"media/Sun.png\", # clear sky day\n 'wind': \"media/Wind.png\", #wind\n 'cloudy': \"media/Cloud.png\", # cloudy day\n 'partly-cloudy-day': \"media/PartlySunny.png\", # partly cloudy day\n 'rain': \"media/Rain.png\", # rain day\n 'snow': \"media/Snow.png\", # snow day\n 'snow-thin': \"media/Snow.png\", # sleet day\n 'fog': \"media/Haze.png\", # fog day\n 'clear-night': \"media/Moon.png\", # clear sky night\n 'partly-cloudy-night': \"media/PartlyMoon.png\", # scattered clouds night\n 'thunderstorm': \"media/Storm.png\", # thunderstorm\n 'tornado': \"assests/Tornado.png\", # tornado\n 'hail': \"assests/Hail.png\" # hail\n}\n\n\n# --------------------------------- Classes ------------------------------------------------------------ #\n\nclass Relogio(Frame):\n\tdef __init__(self, root):\n\t\tFrame.__init__(self, root, bg='black')\n\n\t\t# Defina as variaveis internas\n\t\tself.hora = ''\n\t\tself.dia_semana = ''\n\t\tself.data = ''\n\n\t\t# Defina as Labels do tempo, dia da semana e data\n\t\tself.dataLB = Label(self, text= self.data, font=(fonte, small_text_size), fg = 'white', bg = 'black')\n\t\tself.dataLB.pack(side = TOP, anchor = W)\n\n\t\tself.horaLB = Label(self, text= self.hora, font=(fonte, xlarge_text_size), fg = 'white', bg = 'black')\n\t\tself.horaLB.pack(side = TOP, anchor = W)\n\n\t\t# A propria classe Relogio chama o metodo Tick() quando eh inicializada\n\t\tself.tick()\n\n\tdef tick(self):\n\n\t\t# Receba as informacoes da hora, dia e data\n\t\tself.hora2 = time.strftime('%H:%M')\n\t\tself.dia_semana2 = time.strftime('%A')\n\t\tself.data2 = time.strftime('%A, %d %b')\n\n\t\t# Atualiza as variaveis caso houver alguma mudanca\n\t\tif self.hora != self.hora2:\n\t\t\tself.hora = self.hora2\n\t\tif self.data != self.data2:\n\t\t\tself.data = self.data2\n\n\t\t# Atualiza as Labels com o metodo config()\n\t\tself.horaLB.config(text = self.hora)\n\t\tself.dataLB.config(text = self.data)\n\n\t\t# O metodo se chama apos 200ms para checar por mudancas\n\t\tself.horaLB.after(time_refresh_rate, self.tick)\n\nclass Clima(Frame):\n\tdef __init__(self, root):\n\t\tFrame.__init__(self, root, bg = 'black')\n\n\t\tself.temp = ''\n\t\tself.sumario = ''\n\t\tself.previsao = ''\n\t\tself.icon = ''\n\n\t\t# Mesma coisa das outras classes, criar todas as labels vazias\n\t\tself.tempFR = Frame(self, bg = 'black')\n\t\tself.tempFR.pack(side = TOP, anchor = E)\n\n\t\tself.tempLB = Label(self.tempFR, text = self.temp, font = (fonte, xlarge_text_size), bg = 'black', fg = 'white')\n\t\tself.tempLB.pack(side = RIGHT, anchor = N)\n\n\t\tself.iconLB = Label(self.tempFR, bg = 'black', fg = 'white')\n\t\tself.iconLB.pack(side = LEFT, anchor = N)\n\n\t\tself.sumLB = Label(self, text = self.sumario, font = (fonte, text_size), bg = 'black', fg = 'white')\n\t\tself.sumLB.pack(side = TOP, anchor = E)\n\t\tself.prevLB = Label(self, text = self.previsao, font = (fonte, small_text_size), bg = 'black', fg = 'white')\n\t\tself.prevLB.pack(side = TOP, anchor = E)\n\n\t\tself.get_clima()\n\n\t# Metodo para buscar a latitude e longitude\n\tdef get_lat_long(self):\n\t\tgeo_url = 'http://freegeoip.net/json/'\n\t\treq = requests.get(geo_url)\n\t\tself.geo_object = json.loads(req.text)\n\n\t\treturn self.geo_object['latitude'], self.geo_object['longitude']\n\n\tdef get_clima(self):\n\t\tlat, lon = self.get_lat_long()\n\t\tclima_url = 'https://api.darksky.net/forecast/%s/%s,%s?lang=%s&units=%s' % (clima_token, lat, lon, clima_lang, clima_unit)\n\n\t\t# Request para o objeto Clima, toda estrutura disponivel @ https://darksky.net/dev/docs\n\t\tclima_req = requests.get(clima_url)\n\t\tself.clima_object = json.loads(clima_req.text)\n\n\t\tgrau = u'\\N{DEGREE SIGN}'\n\t\tself.temp2 = str(int(self.clima_object['currently']['temperature'])) + grau # Retirar os pontos decimais\n\t\tself.sumario2 = self.clima_object['currently']['summary']\n\t\tself.previsao2 = self.clima_object['hourly']['summary']\n\n\t\t# Atualizar os valores das labels caso houver uma mudanca\n\t\tif self.temp != self.temp2:\n\t\t\tself.temp = self.temp2\n\t\t\tself.tempLB.config(text = self.temp)\n\t\tif self.sumario != self.sumario2:\n\t\t\tself.sumario = self.sumario2\n\t\t\tself.sumLB.config(text = self.sumario)\n\t\tif self.previsao != self.previsao2:\n\t\t\tself.previsao = self.previsao2\n\t\t\tself.prevLB.config(text = self.previsao)\n\n\t\tself.icon_id = self.clima_object['currently']['icon']\n\n\t\tif self.icon_id in icon_dicionario:\n\t\t\tself.icon2 = icon_dicionario[self.icon_id]\n\t\t\timage = Image.open(self.icon2)\n\t\t\timage = image.resize((80, 80), Image.ANTIALIAS)\n\t\t\timage = image.convert('RGB')\n\t\t\tphoto = ImageTk.PhotoImage(image)\n\t\t\n\t\t\tself.iconLB.config(image=photo)\n\t\t\tself.iconLB.image = photo\n\n\t\t# Chamar a funcao de novo apos um certo tempo\n\t\t# OBS: 1000 Calls de graca, alem disso tem que pagar\n\t\tself.tempLB.after(refresh_rate, self.get_clima)\n\n\n# Classe de Mail e News foram unidas em uma unica classe FeedReader\nclass Feed_Reader(Frame):\n\tdef __init__(self, root, title, url):\n\t\tFrame.__init__(self, root, bg = 'black')\n\t\tself.url = url\n\n\t\ttitleLB = Label(self, \n\t\t\t\t\t\ttext = title, \n\t\t\t\t\t\tfont = (fonte, large_text_size), \n\t\t\t\t\t\tbg = 'black', \n\t\t\t\t\t\tfg = 'white'\n\t\t\t\t\t\t)\n\t\ttitleLB.pack(side = TOP, anchor = W, fill = BOTH)\n\n\t\tself.entrieFR = Frame(self, bg = 'black')\n\t\tself.entrieFR.pack(side = TOP, anchor = W)\n\n\t\tself.get_entries(self.url)\n\n\tdef get_entries(self, url):\n\t\tself.feed = feedparser.parse(url)\n\n\t\tfor children in self.entrieFR.winfo_children():\n\t\t\tchildren.destroy()\n\n\t\tfor entrie in self.feed.entries[0:3]:\n\t\t\tself.post = Titulo(self.entrieFR, entrie['title'])\n\n\t\tself.entrieFR.after(refresh_rate, self.get_entries)\n\n\nclass Titulo(Frame):\n\tdef __init__(self, root, title):\n\t\tFrame.__init__(self, bg = 'black')\n\n\t\tself.title = title.rsplit('-', 1) # Retira a fonte da noticia (Quem precisa de fonte, seculo XXI)\n\t\tself.titleLB = Label(root, text = title, font = (fonte, medium_text_size), bg = 'black', fg = 'white')\n\t\tself.titleLB.pack(side = TOP, anchor = W)\n\nclass Screen_Builder():\n\tdef __init__(self):\n\t\tself.root = Tk()\t# Tela Principal\n\t\tself.root.attributes(\"-fullscreen\", True)\n\n\t\tself.screen_full = True\n\t\tself.root.bind('', self.toggle_fullscreen)\n\n\t\t# Variaveis para o feed\n\n\t\t#news_url = 'http://news.google.com/news?ned=pt-BR_br&outpu t=rss'\n\t\tself.news_url = 'http://feeds.reuters.com/reuters/topNews'\n\n\t\t#mail_usr = 'nicolau.test'\n\t\t#mail_psw = 'anavitoria'\n\t\t#mail_url = 'https://%s:%s@mail.google.com/mail/feed/atom' % (mail_usr, mail_psw)\n\n\t\t# Posicionamento dos frames\n\n\t\tself.topFrame = Frame(self.root, bg = 'black')\n\t\tself.topFrame.pack(side = TOP, anchor = W, fill = BOTH, expand = YES)\n\n\t\tself.top_leftFrame = Frame(self.topFrame, bg = 'black')\n\t\tself.top_leftFrame.pack(side = LEFT, anchor = N)\n\n\t\t#sub_top_leftFrame = Frame(topFrame, bg = 'black')\n\t\t#sub_top_leftFrame.pack(side = LEFT, anchor = N)\n\n\t\tself.top_rightFrame = Frame(self.topFrame, bg = 'black')\n\t\tself.top_rightFrame.pack(side = RIGHT, anchor = N)\n\n\t\tself.bottomFrame = Frame(self.root, bg = 'black')\n\t\tself.bottomFrame.pack(side = BOTTOM, anchor = W, fill = BOTH, expand = YES)\n\n\t\t# Declaracao das Classes\n\t\trelogio = Relogio(self.top_leftFrame)\n\t\trelogio.pack(side = TOP, anchor = W, padx = 60, pady = 40)\n\n\t\t#mail = Feed_Reader(sub_top_leftFrame, title = \"Mail\", url = mail_url)\n\t\t#mail.pack(side = TOP, anchor = W)\n\n\t\tclima = Clima(self.top_rightFrame)\n\t\tclima.pack(side = TOP, anchor = E, pady = 40, padx = 20)\n\n\t\tnews = Feed_Reader(self.bottomFrame, title = \"News\", url = self.news_url)\n\t\tnews.pack(side = BOTTOM, anchor = W, fill = BOTH)\n\n\tdef toggle_fullscreen(self, event = None):\n\t\tself.screen_full = not self.screen_full\n\t\tself.root.attributes(\"-fullscreen\", self.screen_full)\n\ntk = Screen_Builder()\ntk.root.mainloop()\n","sub_path":"SmartMirrorV2/SM_v2.py","file_name":"SM_v2.py","file_ext":"py","file_size_in_byte":8337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"496469738","text":"import os\nimport sys\nimport shutil\nimport argparse\nimport subprocess as sp\n\ninput_root_dir = \"probmods_webppl_to_psi\"\noutput_root_dir = \"benchmarks\"\nblack_list = [\"eps\", \"exp\", \"math\"]\n# , \"popl_examples\"]\nfile_black_list = []\n #[\"clinicalTrial.psi\",\"sum.psi\",\"max.psi\",\\\n# \"gaussian_infer.psi\",\"true_obs_error.psi\",\\\n# \"medical_diagnosis.psi\",\"tug_of_war.psi\",\\\n# \"explain_away.psi\",\"polyas_urn.psi\",\\\n# \"temperature_high_low.psi\",\"temperature_high_low.psi\",\\\n# \"communication_between_teacher_listener.psi\",\\\n# \"goal_inference.psi\",\"preferences.psi\",\\\n# \"scalar_implicature.psi\",\"estimating_causal_power.psi\",\\\n# \"learning_a_continuous_parameter.psi\",\\\n# \"learning_about_coins.psi\",\\\n# \"fair_or_unfair_coin.psi\",\\\n# \"the_rectangle_game_with_weak_sampling.psi\",\\\n# \"sampling_from_a_discrete_distribution.psi\",\\\n# \"the_dirichlet_processes.psi\",\\\n# \"bda_of_tug_of_war_model.psi\",\\\n# \"peoples_models_of_coins_expectation.psi\",\\\n# \"peoples_models_of_coins.psi\",\\\n# \"posterior_prediction.psi\",\\\n# \"single_regression.psi\",\\\n# \"true_obs.psi\",\\\n# \"unknown_numbers_of_categories.psi\",\\\n# \"savage_dickey_method.psi\"]\ncwd_dir = os.getcwd()\n\n\ndef make_dirs(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)\n\ndef run_psense(input_file, output_file, flag_optim):\n cmd = [\"python3\",\"psense.py\", \"-tp\", \"600\", \"-f\", input_file,\"-log\",\"-plain\",flag_optim,\">\", output_file]\n cmd = \" \".join(cmd)\n print(\"Running\", \"psense\", \"-tp\", \"600\", \"-f\", input_file, \"-log\",\"-plain\",flag_optim,\">\", output_file)\n sp.run(cmd, shell=True)\n\ndef exit_message(message):\n print(message)\n exit(0)\n\ndef tool_installed(cmd):\n return shutil.which(cmd) is not None\n\ndef check_cmd():\n if not tool_installed(\"psi\"):\n exit_message(\"Please include \\\"psi\\\" into your PATH.\")\n if not tool_installed(\"wolfram\") and not tool_installed(\"wolframscript\"):\n exit_message(\"Please include \\\"wolfram\\\" or \\\"wolframscript\\\" into your PATH.\")\n\ndef main():\n if len(sys.argv) != 3 and len(sys.argv) != 4 :\n exit_message(\"Usage: python3 benchmark.py input_directory output_directory [-s]\\n\\\n optional argument: -s Run PSense with '-s'\")\n if (sys.version_info < (3, 5)): \n exit_message(\"Python vesrion should be 3.5 or greater.\")\n check_cmd()\n input_root_dir = sys.argv[1]\n output_root_dir = sys.argv[2]\n if len(sys.argv) >= 4 and sys.argv[3] == \"-s\":\n flag_optim = \"-s\"\n else:\n flag_optim = \"\"\n if not os.path.isdir(input_root_dir):\n print(\"The input directory \\\"\" + input_root_dir + \"\\\" is not existed.\")\n for next_dir, _, file_list in os.walk(input_root_dir):\n if any([key in next_dir for key in black_list]):\n continue\n print(\"\\nDIR:\", next_dir)\n for file_name in file_list:\n if not file_name.endswith(\".psi\") or (file_name in file_black_list):\n continue\n input_file = os.path.join(next_dir, file_name)\n output_dir = os.path.join(output_root_dir, next_dir[len(input_root_dir):].lstrip(\"/\"))\n make_dirs(output_dir)\n output_file_name = file_name.replace(\".psi\", \".txt\")\n output_file = os.path.join(output_dir, output_file_name)\n run_psense(input_file, output_file, flag_optim)\n sp.run(\"cat \" + output_file,shell=True)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"293970414","text":"'''\nMIT License\n\nCopyright (c) 2019 DEIS Project\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\n#!/usr/bin/env python\nimport os\nimport subprocess\n\nimport rospy\n\n\ndef start_consert_evaluator():\n rospy.init_node('consert_evaluation_wrapper', anonymous=True)\n\n # get relevant ros parameters that shall be defined in the launch file that starts this node.\n model_storage_directory = rospy.get_param(\"model_storage_directory\", \"\")\n epsilon_script_directory = rospy.get_param(\"~epsilon_script_directory\")\n meta_model_file_path = rospy.get_param(\"~metamodel_file\")\n conserts_evaluator_path = rospy.get_param(\"~conserts_evaluator_path\")\n thrift_server_port = str(rospy.get_param(\"~thrift_server_port\"))\n\n print(\"server port = \" + thrift_server_port)\n\n print('Start ConSerts evaluation component.')\n # start ConSert evaluation component in an additional subprocess\n subprocess.call(['java',\n '-jar', conserts_evaluator_path,\n '-server-port', thrift_server_port,\n '-modelStorageDirectory', model_storage_directory,\n '-epsilonScriptDirectory', epsilon_script_directory,\n '-metaModelFile', meta_model_file_path])\n rospy.spin()\n\n\nif __name__ == '__main__':\n start_consert_evaluator()\n","sub_path":"runtime_safety_assurance_ros_packages/consert_evaluation/scripts/consert_evaluation_wrapper.py","file_name":"consert_evaluation_wrapper.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"604145956","text":"from bs4 import BeautifulSoup\nimport requests\n\nLEAGUE_URL = 'http://teamtrivia.com/lg_scores.aspx'\nATLANTA_LEAGUE_NUM = 3\nTEAM_DETAILS_URL = 'http://www.teamtrivia.com/lg_teamdet.aspx'\n\n\nclass TeamNotFoundException(BaseException):\n pass\n\n\ndef get_locations(league_id):\n \"\"\"\n Returns a hash map of id -> name for each of the locations found in the\n Location select box on a league's homepage.\n \"\"\"\n response = requests.get(LEAGUE_URL, params={'lg': league_id})\n league_soup = BeautifulSoup(response.content)\n select = league_soup.find(id='cmbLocFilt')\n locations = {o['value']:o.text for o in select.find_all('option')\n if o.text not in ('','--Show All--')\n }\n return locations\n\ndef get_teams_for_location(league_id, location_id):\n \"\"\"\n Retrieve a hash map of id -> name for teams based at a given\n location.\n \"\"\"\n session = requests.Session()\n first_get = session.get(LEAGUE_URL, params={'lg': league_id})\n get_soup = BeautifulSoup(first_get.content)\n viewstate = get_soup.find(attrs={'name':'__VIEWSTATE'})['value']\n location_resp = session.post(LEAGUE_URL, params={'lg': league_id},\n data={'__VIEWSTATE': viewstate,\n 'cmbLocFilt': location_id,\n 'cmdFilterbyLoc': 'Go',\n 'ea': 'your email address',\n 'm': 1011241472135,\n 'p': 'oi'})\n location_soup = BeautifulSoup(location_resp.content)\n teams = {}\n score_table = location_soup.find(id=\"dgScores\")\n for tr in score_table.find_all('tr')[1:]:\n tds = tr.find_all('td')\n team_id = tds[1].text\n team_name = tds[2].text\n teams[team_id] = team_name\n\n return teams\n\ndef get_team(league_id, team_id):\n \"\"\"\n Retrieve a dictionary representation of a team's attributes\n \"\"\"\n response = requests.get(TEAM_DETAILS_URL, params={'lg': league_id,\n 'tm': team_id})\n soup = BeautifulSoup(response.content)\n\n if soup.find(id='lblNam').text == '':\n raise TeamNotFoundException('Team with id {} does not exist'.format(team_id))\n\n team_name = soup.find(id='lblNam').text\n team_captain = soup.find(id='lblCaptain').text\n team_base = soup.find(id='lblHome_Loc').text\n team_rank = soup.find(id='lblRank').text\n team_top20_scores = soup.find(id='lblScore').text\n\n team_scores = []\n score_table = soup.find(id='dgScores')\n for tr in score_table.find_all('tr')[1:]:\n tds = tr.find_all('td')\n score = tds[0].text\n date = tds[1].text\n location = tds[2].text\n team_scores.append({'score': score, 'date': date, 'location': location})\n\n team_members = []\n members_table = soup.find(id='dgMems')\n for td in members_table.find_all('td')[1:]:\n team_members.append(td.text)\n\n team_dict = {}\n team_dict['id'] = team_id\n team_dict['name'] = team_name\n team_dict['captain'] = team_captain\n team_dict['home_location'] = team_base\n team_dict['rank'] = team_rank\n team_dict['total_score'] = team_top20_scores\n team_dict['scores'] = team_scores\n team_dict['members'] = team_members\n\n return team_dict","sub_path":"scraper/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"384710046","text":"# author:joketeng time:2018/11/25\r\nnum1 = input()\r\nnum2 = input()\r\nnum3 = input()\r\nnum4 = input()\r\ni = 0\r\nres = []\r\nwhile i < len(num1) and i < len(num2):\r\n if num1[i] == num2[i] and (num1[i] >= 'A' and num1[i] <= 'G'):\r\n res.append(num1[i])\r\n break\r\n i += 1\r\ni += 1\r\nwhile i < len(num1) and i < len(num2):\r\n if num1[i] == num2[i] and ((num1[i] >= 'A' and num1[i] <= 'N') or num1[i].isdigit()):\r\n res.append(num1[i])\r\n break\r\n i += 1\r\nj = 0\r\nwhile j < len(num3) and j < len(num4):\r\n if num3[j] == num4[j] and num3[j].isalpha():\r\n res.append(j)\r\n break\r\n j += 1\r\nm = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']\r\nif res[1].isdigit():\r\n x = ord(res[1]) - ord('0')\r\nelse:\r\n x = ord(res[1]) - ord('A') + 10\r\nss = '%s %02d:%02d' % (m[ord(res[0]) - ord('A')], x, res[2])\r\nprint(ss)\r\n\r\n","sub_path":"pat-simple/1014.py","file_name":"1014.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"531823702","text":"from multiprocessing import Process, Queue\nfrom pint import UnitRegistry, set_application_registry\nfrom pint.testsuite import QuantityTestCase\n\nureg = UnitRegistry()\nset_application_registry(ureg)\nQ = ureg.Quantity\n\n\ndef f(to_child, to_parent):\n one, two = to_child.get()\n to_parent.put(one / two)\n\n\nclass TestIssues(QuantityTestCase):\n\n def test_multiprocessing(self):\n to_child = Queue()\n to_parent = Queue()\n p = Process(target=f, args=(to_child, to_parent))\n p.start()\n t1 = Q(50, 'ms')\n t2 = Q(50, ' ns')\n to_child.put((t1, t2))\n result = to_parent.get()\n self.assertEqual(result, t1 / t2)\n self.assertEqual(int(round(float(result))), 1000000)\n p.join()","sub_path":"pint/testsuite/test_multiprocessing.py","file_name":"test_multiprocessing.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"185497633","text":"# Copyright (c) 2015 Xilinx Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\nimport sys\nimport datetime\nimport threading\n\nclass LoggerSeverity:\n\tInfo= 0\n\tError = 1\n\tWarning = 2\n\tNote = 3\n\tDebug = 4\n\tUnknown = 5\n\nclass LoggerLevel:\n\tAll = -1\n\tHeavyDebug = 100000\n\tDebug = 10000\n\tFullVerbose = 1000\n\tVerbose = 100\n\tNormal = 10\n\nclass Logger:\n\tdefaultlogger = None\n\tdef __init__(self, verbosity = LoggerLevel.Normal):\n\t\tself.level = verbosity\n\n\tdef log(self, message, level = LoggerLevel.Normal, severity = LoggerSeverity.Info):\n\t\tpass\n\n\tdef verbose(self, message):\n\t\tself.log(message, LoggerLevel.Verbose, LoggerSeverity.Info)\n\n\tdef debug(self, message):\n\t\tself.log(message, LoggerLevel.Debug, LoggerSeverity.Debug)\n\n\tdef fdebug(self, message):\n\t\tself.log(message, LoggerLevel.HeavyDebug, LoggerSeverity.Debug)\n\n\tdef error(self, message):\n\t\tself.log(message, LoggerLevel.Normal, LoggerSeverity.Error)\n\n\tdef warning(self, message):\n\t\tself.log(message, LoggerLevel.Normal, LoggerSeverity.Warning)\n\n\tdef note(self, message):\n\t\tself.log(message, LoggerLevel.Verbose, LoggerSeverity.Note)\n\nclass SimpleLogger(Logger):\n\tdefaultlogger = None\n\tstdoutputlock = threading.RLock()\n\n\tdef __init__(self, verbosity = LoggerLevel.Normal):\n\t\tLogger.__init__(self, verbosity)\n\t\tself.enableprefix = True\n\t\tself.timestamp = False\n\n\tdef log(self, message, level = LoggerLevel.Normal, severity = LoggerSeverity.Info):\n\t\tfrom hopper.utils.console.ConsoleColor import ConsoleColor, colorString\n\t\twith SimpleLogger.stdoutputlock:\n\t\t\tsplitup = str(message).splitlines()\n\t\t\tfor i in splitup:\n\t\t\t\tif level <= self.level or self.level == LoggerLevel.All:\n\t\t\t\t\tprefix = \"\"\n\t\t\t\t\tif self.enableprefix:\n\t\t\t\t\t\tif severity == LoggerSeverity.Info:\n\t\t\t\t\t\t\tprefix = colorString(\"INFO: \", ConsoleColor.Blue)\n\t\t\t\t\t\telif severity == LoggerSeverity.Error:\n\t\t\t\t\t\t\tprefix = colorString(\"ERROR: \", ConsoleColor.Red)\n\t\t\t\t\t\telif severity == LoggerSeverity.Warning:\n\t\t\t\t\t\t\tprefix = colorString(\"WARN: \", ConsoleColor.Yellow)\n\t\t\t\t\t\telif severity == LoggerSeverity.Note:\n\t\t\t\t\t\t\tprefix = colorString(\"NOTE: \", ConsoleColor.White)\n\t\t\t\t\t\telif severity == LoggerSeverity.Debug:\n\t\t\t\t\t\t\tprefix = colorString(\"DEBUG: \", ConsoleColor.Cyan)\n\t\t\t\t\tif self.timestamp:\n\t\t\t\t\t\tprefix = \"(%s) \" % datetime.datetime.now().strftime(\"%X\") + prefix\n\t\t\t\t\tprint(\"%s%s\" % (prefix, i))\n\t\t\t\t\tsys.stdout.flush()\n\nclass FileLogger(Logger):\n\tdef __init__(self, filepath, verbosity = LoggerLevel.Normal):\n\t\tLogger.__init__(self, verbosity)\n\t\tself.enableprefix = False\n\t\tself.timestamp = False\n\t\tself.filestream = open(filepath, \"w\")\n\t\tself.writelock = threading.RLock()\n\n\tdef close(self):\n\t\tself.filestream.close()\n\n\tdef log(self, message, level = LoggerLevel.Normal, severity = LoggerSeverity.Info):\n\t\twith self.writelock:\n\t\t\tsplitup = str(message).splitlines()\n\t\t\tfor i in splitup:\n\t\t\t\tif level <= self.level or self.level == LoggerLevel.All:\n\t\t\t\t\tprefix = \"\"\n\t\t\t\t\tif self.enableprefix:\n\t\t\t\t\t\tif severity == LoggerSeverity.Info:\n\t\t\t\t\t\t\tprefix = \"INFO: \"\n\t\t\t\t\t\telif severity == LoggerSeverity.Error:\n\t\t\t\t\t\t\tprefix = \"ERROR: \"\n\t\t\t\t\t\telif severity == LoggerSeverity.Warning:\n\t\t\t\t\t\t\tprefix = \"WARN: \"\n\t\t\t\t\t\telif severity == LoggerSeverity.Note:\n\t\t\t\t\t\t\tprefix = \"NOTE: \"\n\t\t\t\t\t\telif severity == LoggerSeverity.Debug:\n\t\t\t\t\t\t\tprefix = \"DEBUG: \"\n\t\t\t\t\tif self.timestamp:\n\t\t\t\t\t\tprefix = \"(%s) \" % datetime.datetime.now().strftime(\"%X\") + prefix\n\t\t\t\t\tself.filestream.write(\"%s%s\\n\" % (prefix, i))\n\t\t\t\t\tself.filestream.flush()\n\nclass PrefixPassLogger(Logger):\n\tdef __init__(self, logger, prefix = None):\n\t\tLogger.__init__(self)\n\t\tself.logger = logger\n\t\tself.prefix = prefix\n\n\tdef log(self, message, level = LoggerLevel.Normal, severity = LoggerSeverity.Info):\n\t\tif self.logger:\n\t\t\tsplitup = str(message).splitlines()\n\t\t\tfor i in splitup:\n\t\t\t\tif self.prefix:\n\t\t\t\t\tself.logger.log(\"%s: %s\" % (self.prefix, i), level, severity)\n\t\t\t\telse:\n\t\t\t\t\tself.logger.log(i, level, severity)\n\nclass MassLogger(Logger):\n\tdef __init__(self):\n\t\tLogger.__init__(self)\n\t\tself.loggers = []\n\t\tself.loggerslock = threading.RLock()\n\n\tdef addLogger(self, logger, key = None):\n\t\twith self.loggerslock:\n\t\t\tself.loggers.append((logger, key))\n\n\tdef getLogger(self, key):\n\t\twith self.loggerslock:\n\t\t\tfor i in self.loggers:\n\t\t\t\tif i[1] == key:\n\t\t\t\t\treturn i[0]\n\t\treturn None\n\n\tdef log(self, message, level = LoggerLevel.Normal, severity = LoggerSeverity.Info):\n\t\twith self.loggerslock:\n\t\t\tif len(self.loggers) != 0:\n\t\t\t\tsplitup = str(message).splitlines()\n\t\t\t\tfor i in splitup:\n\t\t\t\t\tfor l in self.loggers:\n\t\t\t\t\t\tl[0].log(i, level, severity)\n\n","sub_path":"hopper/utils/logger/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":5540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"312075596","text":"#!/usr/bin/python3\n#\n# Logan Thomas\n# Cloud Computing Lab\n# Assignment 3\n#\n\nimport sys\nimport socket\nimport select\n\n\ndef chat_client():\n\tif len(sys.argv) < 3:\n\t\tprint('Usage : python3 chatClient.py hostname port')\n\t\tsys.exit()\n\t\n\thost = sys.argv[1]\n\tport = int(sys.argv[2])\n\t\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ts.settimeout(2)\n\t\n\t# connect to remote host\n\ttry:\n\t\ts.connect((host, port))\n\texcept:\n\t\tprint('Unable to connect')\n\t\tprint('Usage : python3 chatClient.py hostname port')\n\t\tsys.exit()\n\t\n\twhile 1:\n\t\tsocket_list = [sys.stdin, s]\n\t\t\n\t\t# Get the list sockets which are readable\n\t\tready_to_read, ready_to_write, in_error = select.select(socket_list, [], [])\n\t\t\n\t\tfor sock in ready_to_read:\n\t\t\tif sock == s:\n\t\t\t\t# incoming message from remote server, s\n\t\t\t\tdata = sock.recv(4096).decode('utf-8')\n\t\t\t\tif not data:\n\t\t\t\t\tprint('\\nDisconnected from chat server')\n\t\t\t\t\tsys.exit()\n\t\t\t\telse:\n\t\t\t\t\t# print data\n\t\t\t\t\tsys.stdout.write(data)\n\t\t\t\t\tsys.stdout.flush()\n\t\t\telse:\n\t\t\t\t# user entered a message\n\t\t\t\tmsg = sys.stdin.readline()\n\t\t\t\ts.send(str.encode(msg))\n\t\t\t\tsys.stdout.flush()\n\n\nif __name__ == \"__main__\":\n\tsys.exit(chat_client())\n","sub_path":"Assignment 3/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"480659341","text":"#! /usr/bin/python\n\nimport __init__\n\nfrom os.path import dirname, join, abspath\nimport newspaper\nfrom mq import redis_queue\nimport json\n\nclass Crawler(object):\n \"\"\"Crawler class prepares a news source and writes it into the redis mq\"\"\"\n def __init__(self):\n self.queue = redis_queue.connect()\n\n def crawl_all(self):\n filepath = dirname(dirname(dirname(abspath(__file__))))+\"/data/sources.txt\"\n with(open(filepath)) as infile:\n for line in infile:\n self.crawl(line.strip())\n\n def crawl(self, news_source_url):\n news_source = newspaper.build(news_source_url, dry=False)\n jsonified_news_source = self.jsonify(news_source)\n self.queue.publish('newssource-channel', jsonified_news_source)\n\n def jsonify(self, news_source):\n data = {}\n data['articles'] = []\n data['categories'] = []\n for article in news_source.articles:\n data['articles'].append(article.url)\n for category in news_source.categories:\n data['categories'].append(category.url) \n data['newssource'] = news_source.url\n return json.dumps(data)\n\nif __name__ == \"__main__\":\n crawl = Crawler()\n crawl.crawl_all()\n print(\"Completed Crawl\")","sub_path":"app/crawler/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"554160636","text":"import subprocess\nimport logging\nimport datetime\n\nfrom taca.utils import statusdb\nfrom taca.utils.config import CONFIG\nfrom taca.utils.misc import send_mail\n\n\ndef get_nases_disk_space():\n result = {}\n config = CONFIG['server_status']\n servers = config.get('servers', dict())\n\n for server_url, path in servers.items():\n\n # Get command\n command = '{command} {path}'.format(command=config['command'], path=path)\n\n # If localhost, don't connect to ssh\n if server_url == 'localhost':\n command = command.split()\n else:\n if 'promethion' in server_url:\n user = 'prom'\n else:\n user = config['user']\n # Connect via ssh to server and execute the command\n command = ['ssh', '-t', '{}@{}'.format(user, server_url), command]\n\n result[server_url] = _run_cmd(command)\n\n # Storage systems are mouted locally, e.g. ngi-nas\n for storage_system, path in config.get('storage_systems', {}).items():\n # Get command\n command = '{command} {path}'.format(command=config['command'], path=path)\n result[storage_system] = _run_cmd(command.split())\n\n return result\n\ndef _run_cmd(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output = proc.stdout.read().decode(\"utf-8\")\n return _parse_output(output)\n\ndef _parse_output(output): # for nases\n # command = df -h /home\n # output = Filesystem Size Used Avail Use% Mounted on\n # /dev/mapper/VGStor-lv_illumina\n # 24T 12T 13T 49% /srv/illumina\n\n output = output.strip()\n output = output.split()\n try:\n mounted_on = output[-1]\n used_percentage = output[-2]\n space_available = output[-3]\n space_used = output[-4]\n disk_size = output[-5]\n filesystem = output[-6]\n\n available_percentage = str(100 - int(used_percentage.replace('%',''))) + '%'\n\n result = {\n 'disk_size': disk_size,\n 'space_used': space_used,\n 'space_available': space_available,\n 'used_percentage': used_percentage,\n 'available_percentage': available_percentage,\n 'mounted_on': mounted_on,\n 'filesystem': filesystem\n }\n except:\n # Sometimes it fails for whatever reason as Popen returns not what it is supposed to\n result = {\n 'disk_size': 'NaN',\n 'space_used': 'NaN',\n 'space_available': 'NaN',\n 'used_percentage': 'NaN',\n 'available_percentage': 'NaN',\n 'mounted_on': 'NaN',\n 'filesystem': 'NaN'\n }\n logging.error('Can not parse the output: {}'.format(output))\n\n return result\n\ndef update_status_db(data, server_type=None):\n \"\"\" Pushed the data to status db.\n\n data can be from nases\n server_type should be 'nas'.\n \"\"\"\n db_config = CONFIG.get('statusdb')\n if db_config is None:\n logging.error('\"statusdb\" must be present in the config file!')\n raise RuntimeError('\"statusdb\" must be present in the config file!')\n try:\n couch_connection = statusdb.StatusdbSession(db_config).connection\n except Exception as e:\n logging.error(e.message)\n raise\n\n db = couch_connection['server_status']\n logging.info('Connection established')\n for key in data.keys(): # data is dict of dicts\n server = data[key] # data[key] is dictionary (the command output)\n server['name'] = key # key is nas url\n # datetime.datetime(2015, 11, 18, 9, 54, 33, 473189) is not JSON serializable\n server['time'] = datetime.datetime.now().isoformat()\n server['server_type'] = server_type or 'unknown'\n\n try:\n db.save(server)\n except Exception as e:\n logging.error(e.message)\n raise\n else:\n logging.info('{}: Server status has been updated'.format(key))\n\ndef check_promethion_status():\n config = CONFIG.get('promethion_status')\n server = config.get('server')\n path = config.get('path')\n command = config.get('command')\n command_to_run = f'{command} {path}'\n user = config.get('user')\n\n try:\n subprocess.run(['ssh', '-t', f'{user}@{server}', command_to_run], \n check=True)\n except subprocess.CalledProcessError:\n _send_promethion_warning_email()\n return False\n return True\n\ndef _send_promethion_warning_email():\n email_recipients = CONFIG.get('mail').get('recipients')\n email_subject = ('An issue with the PromethION has been detected.')\n email_message = ('An issue with the PromethION has been detected. '\n 'Please investigate and consider pausing the transfer cronjob on preproc1')\n send_mail(email_subject, email_message, email_recipients)","sub_path":"taca/server_status/server_status.py","file_name":"server_status.py","file_ext":"py","file_size_in_byte":4878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"317501679","text":"from docutils import nodes\nfrom sphinx.writers.html import HTMLTranslator\n\n\nclass BalancedHTMLTranslator(HTMLTranslator):\n\n def visit_section(self, node):\n ids = node.get('ids')\n node['ids'] = []\n HTMLTranslator.visit_section(self, node)\n node['ids'] = ids\n\n def visit_title(self, node):\n if not isinstance(node.parent, nodes.section):\n HTMLTranslator.visit_title(self, node)\n return\n\n h_level = self.section_level + self.initial_header_level - 1\n atts = {}\n if (len(node.parent) >= 2 and\n isinstance(node.parent[1], nodes.subtitle)):\n atts['CLASS'] = 'with-subtitle'\n self.body.append(self.starttag(node, 'h%s' % h_level, '', **atts))\n ids = node.parent.get('ids', [])\n for id_ in ids:\n self.body.append('' % id_)\n atts = {}\n if node.hasattr('refid'):\n atts['class'] = 'toc-backref'\n atts['href'] = '#' + node['refid']\n if atts:\n self.body.append(self.starttag({}, 'a', '', **atts))\n close_tag = '\\n' % (h_level)\n else:\n close_tag = '\\n' % (h_level)\n self.context.append(close_tag)\n\n def visit_container(self, node):\n self.body.append(self.starttag(node, 'div', CLASS='sphinxcontainer'))\n","sub_path":"htmlwriter.py","file_name":"htmlwriter.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"397562211","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nProject Euler Problem 177\n=======================\n\nLet ABCD be a convex quadrilateral, with diagonals AC and BD. At each\nvertex the diagonal makes an angle with each of the two sides, creating\neight corner angles.\n\nFor example, at vertex A, the two angles are CAD, CAB.\n\nWe call such a quadrilateral for which all eight corner angles have\ninteger values when measured in degrees an \"integer angled quadrilateral\".\nAn example of an integer angled quadrilateral is a square, where all eight\ncorner angles are 45DEG. Another example is given by DAC = 20DEG, BAC =\n60DEG, ABD = 50DEG, CBD = 30DEG, BCA = 40DEG, DCA = 30DEG, CDB = 80DEG,\nADB = 50DEG.\n\nWhat is the total number of non-similar integer angled quadrilaterals?\n\nNote: In your calculations you may assume that a calculated angle is\nintegral if it is within a tolerance of 10^-9 of an integer value.\n\n\"\"\"\n\n\ndef main():\n return \"unimplemented\"\n\n\nif __name__ == \"__main__\":\n import ntpath\n import time\n from common.shared_functions import verify_solution\n\n problem_number = int(ntpath.basename(__file__).replace(\"euler\", \"\").replace(\".py\", \"\"))\n print(\"Retrieving my answer to Euler Problem {0} ...\".format(problem_number))\n\n ts = time.time()\n my_answer = main()\n te = time.time()\n\n print(\"My answer: {1}\".format(problem_number, my_answer))\n\n verification_type = verify_solution(problem_number, my_answer)\n print(\"Verification: {0}\".format(verification_type.name))\n print(\"Took {0} seconds.\".format(te - ts))\n","sub_path":"project-euler/solvers/euler177.py","file_name":"euler177.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"456467331","text":"import os\nimport logging\nimport environ\nimport json\nimport html\nfrom paramiko import SSHClient\nfrom paramiko.client import AutoAddPolicy\nfrom scp import SCPClient\nfrom tempfile import TemporaryDirectory\nfrom datetime import date\n\nfrom django.core.files import File\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\nfrom django.conf import settings\n\nfrom .models import FileAttachment, Purchase, Item\nfrom users.models import Profile\nfrom events.models import Event\nfrom library.models import ZotItem\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef download_missing_files():\n local_dir = TemporaryDirectory()\n env = environ.Env()\n\n ssh = SSHClient()\n ssh.load_system_host_keys()\n ssh.connect('scholarium.at', username=env('SSH_USER'), password=env('SSH_PASSWORD'))\n\n scp = SCPClient(ssh.get_transport())\n for attachment in FileAttachment.objects.all():\n try:\n attachment.file.open()\n attachment.file.close()\n except FileNotFoundError:\n local_path = os.path.join(local_dir.name, os.path.split(attachment.file.name)[1])\n\n scp.get(os.path.join('~/scholarium_daten/', attachment.file.name), local_path=local_dir.name)\n with open(local_path, 'rb') as local_file:\n attachment.file = File(local_file, name=os.path.split(attachment.file.name)[1])\n attachment.save()\n logger.debug(f'Uploaded file {attachment.file.name}')\n scp.close()\n ssh.close()\n\n\ndef download_old_db():\n env = environ.Env()\n\n ssh = SSHClient()\n ssh.load_system_host_keys()\n ssh.set_missing_host_key_policy(AutoAddPolicy)\n ssh.connect('scholarium.at', username=env('SSH_USER'), password=env('SSH_PASSWORD'))\n\n scp = SCPClient(ssh.get_transport())\n scp.get(os.path.join('~/scholarium_staging/db.json'))\n logger.debug(f'Downloaded db.json')\n\n scp.close()\n ssh.close()\n\n\ndef import_from_json():\n database = json.load(open(settings.ROOT_DIR.path('db.json')))\n\n # Articles\n logger.info('Importing purchases...')\n\n purchases = [i for i in database if i['model'] == 'Produkte.kauf']\n books = {b['pk']: b for b in database if b['model'] == 'Scholien.buechlein'}\n\n for purchase in purchases:\n profile = Profile.objects.get(old_pk=purchase['fields']['nutzer'])\n model_name, obj_pk, item = str(purchase['fields']['produkt_pk']).split('+')\n if model_name == 'veranstaltung':\n continue\n try:\n event = Event.objects.get(old_pk=obj_pk)\n except ObjectDoesNotExist:\n logger.error(f'Event with pk {obj_pk} not found')\n continue\n\n if item == 'aufzeichnung':\n type_slug = 'recording'\n elif item == 'teilnahme':\n type_slug = 'attendance'\n\n try:\n Purchase.objects.update_or_create(\n amount=purchase['fields']['menge'],\n profile=profile,\n item=event.product.item_set.get(type__slug=type_slug),\n date=date.fromisoformat(purchase['fields']['zeit'][:10]),\n executed=True\n )\n logger.debug('done')\n except Item.DoesNotExist:\n logger.error(f'Event {event} misses type {type_slug}.')\n else:\n print(item)\n elif model_name == 'buechlein':\n book = books[int(obj_pk)]\n title = html.unescape(book['fields']['bezeichnung'].split(' ')[-1])\n try:\n zotitem = ZotItem.objects.get(title=title)\n except ObjectDoesNotExist:\n logger.error(f'ZotItem {title} not found')\n continue\n except MultipleObjectsReturned:\n logger.error(f'Multiple Zotitems found for {title}')\n continue\n\n if item in ['pdf', 'epub', 'mobi']:\n try:\n Purchase.objects.update_or_create(\n amount=1,\n profile=profile,\n item=zotitem.product.item_set.get(type__slug=type_slug),\n date=date.fromisoformat(purchase['fields']['zeit'][:10]),\n executed=True\n )\n logger.debug(f'Zotitem purchase for {title} successfully imported')\n except ObjectDoesNotExist:\n logger.error(f'Item {title} as {item} not found')\n elif model_name != 'buch':\n logger.error(f\"Could not import {purchase['fields']['produkt_pk']}\")\n\n logger.info('Finished importing purchases')\n","sub_path":"scholariumat/products/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"509078226","text":"import Mix as mix\nfrom scipy import constants\n\n\ndef reload():\n # Important: put here all modules that you want to reload\n mix.reload_module(mix)\n\n\ndef bell_state(n_qubits):\n from qiskit import QuantumCircuit\n\n # Create measurement acting on the q register\n # In Qiskit: qubit ordering: |Qn, Qn-1, ... Q0>\n meas = QuantumCircuit(n_qubits, n_qubits)\n\n # Map the quantum measurement to the classical bits\n meas.measure(range(n_qubits), range(n_qubits))\n\n # Create quantum circuit (with only unitary gates)\n qc_circuit = QuantumCircuit(n_qubits)\n\n # Add a H gate on qubit 0\n qc_circuit.h(0)\n\n # Add CX (CNOT) gates on control qubit 0 and target id_qubit\n for id_qubit in range(1, n_qubits):\n qc_circuit.cx(0, id_qubit)\n\n # Full circuit\n full_circuit = qc_circuit + meas\n\n # # Print some properties of the circuits:\n # print('Quantum circuit (QCi) width is ', qc_circuit.width())\n # print('Full circuit (FCi) width is ', full_circuit.width())\n # print('FCi\\'s list of operations: ', full_circuit.count_ops())\n # print('Number of gates in QCi is ', qc_circuit.size())\n # print('QCi depth is ', qc_circuit.depth())\n # print('QCi unitary factors (number of independent subcircuits) is ', qc_circuit.num_unitary_factors())\n\n return full_circuit, qc_circuit","sub_path":"Circuits.py","file_name":"Circuits.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"517536558","text":"# -*- coding:utf-8 -*-\nfrom localPath import *\nimport os\nimport csv\nimport json\nfrom dateutil.parser import parse\nfrom DataAPI.utils import getJsonData, saveJsonDataToPath\nfrom APIS.multi_threads import multipleProcess\nfrom DataAPI.apis import getRawBikeDataFileList\ndateTimeMode = '%Y-%m-%d'\n\n\nstationIdOrderByBuildTime = getJsonData('stationIdOrderByBuildTime.json')\nstationIdList = stationIdOrderByBuildTime['stationID']\n\n# multiple threads\n# 1 distribute list\ndistributeList = stationIdList\n# 2 partition function\npartitionFunc = lambda dtList, i, n_job: [dtList[e] for e in range(len(dtList)) if e % n_job == i]\n# 3 n_jobs\nn_jobs = 12\n# 4 reduce function\ndef reduceFunction(a, b):\n for key, value in b.items():\n a[key] = value\n return a\n# 5 task function\ndef task(ShareQueue, Locker, distributedList, parameterList):\n csvFileNameList = getRawBikeDataFileList()\n finalResult = {}\n for csvFile in csvFileNameList:\n with open(os.path.join(rawBikeDataPath, csvFile), 'r') as f:\n print(csvFile)\n f_csv = csv.reader(f)\n headers = next(f_csv)\n for row in f_csv:\n startStationID = row[3]\n endStationID = row[7]\n if startStationID in distributedList:\n if startStationID not in finalResult:\n finalResult[startStationID] = {}\n startTime = row[1]\n startTimeData = parse(startTime)\n startTimeDateString = startTimeData.strftime(dateTimeMode)\n # 左闭右开\n startTimeMinute = startTimeData.hour * 60 + startTimeData.minute\n if startTimeDateString not in finalResult[startStationID]:\n finalResult[startStationID][startTimeDateString] = {'in': {},\n 'out': {},\n 'inStation': {},\n 'outStation': {}}\n if startTimeMinute not in finalResult[startStationID][startTimeDateString]['out']:\n finalResult[startStationID][startTimeDateString]['out'][startTimeMinute] = 0\n if startTimeMinute not in finalResult[startStationID][startTimeDateString]['outStation']:\n finalResult[startStationID][startTimeDateString]['outStation'][startTimeMinute] = []\n finalResult[startStationID][startTimeDateString]['out'][startTimeMinute] += 1\n finalResult[startStationID][startTimeDateString]['outStation'][startTimeMinute].append(endStationID)\n if endStationID in distributedList:\n if endStationID not in finalResult:\n finalResult[endStationID] = {}\n stopTime = row[2]\n stopTimeDate = parse(stopTime)\n stopTimeDateString = stopTimeDate.strftime(dateTimeMode)\n # 左闭右开\n stopTimeMinute = stopTimeDate.hour * 60 + stopTimeDate.minute\n if stopTimeDateString not in finalResult[endStationID]:\n finalResult[endStationID][stopTimeDateString] = {'in': {},\n 'out': {},\n 'inStation': {},\n 'outStation': {}}\n if stopTimeMinute not in finalResult[endStationID][stopTimeDateString]['in']:\n finalResult[endStationID][stopTimeDateString]['in'][stopTimeMinute] = 0\n if stopTimeMinute not in finalResult[endStationID][stopTimeDateString]['inStation']:\n finalResult[endStationID][stopTimeDateString]['inStation'][stopTimeMinute] = []\n finalResult[endStationID][stopTimeDateString]['in'][stopTimeMinute] += 1\n finalResult[endStationID][stopTimeDateString]['inStation'][stopTimeMinute].append(startStationID)\n print('Process Finish')\n Locker.acquire()\n ShareQueue.put(finalResult)\n Locker.release()\n# 6 parameter list\nparameterList = []\n# 7 run !\nif __name__ == '__main__':\n demandResult = multipleProcess(distributeList=distributeList, partitionDataFunc=partitionFunc, taskFunction=task,\n n_jobs=n_jobs, reduceFunction=reduceFunction, parameterList=parameterList)\n # save data\n for keys in demandResult:\n saveJsonDataToPath(demandResult[keys], os.path.join(demandMinDataPath, '%s.json' % keys))","sub_path":"NYC/PrepareData/ComputeMinDemand.py","file_name":"ComputeMinDemand.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"333804473","text":"import pika\nimport pickle\nimport json\nimport numpy as np\n\n# Загрузка регрессионной модели\nwith open('myfile.pkl', 'rb') as pkl_file:\n regressor = pickle.load(pkl_file)\n\ntry:\n # Соединение с сервером localhost\n connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\n channel = connection.channel()\n\n # Создание очередей сообщений\n channel.queue_declare(queue='Features')\n channel.queue_declare(queue='y_pred')\n\n # Функция для отправки предсказанных ответов в очередь y_pred\n def callback(channel, method, property, body):\n print(f'Получен вектор признаков {body}')\n features_dict = json.loads(body)\n features = np.array(list(features_dict.values()))\n curr_id = int(list(features_dict.keys())[0])\n y_pred = regressor.predict(features)[0]\n channel.basic_publish(exchange='', routing_key='y_pred', body=json.dumps({curr_id: y_pred}))\n print(f'Сообщение с Id={curr_id} и предсказанием {y_pred} отправлено в очередь y_pred')\n\n\n # Ожидание сообщений из очереди queue_features\n channel.basic_consume(queue='Features', on_message_callback=callback, auto_ack=True)\n print('...Ожидание сообщений, для выхода нажмите CTRL+C')\n\n channel.start_consuming()\nexcept:\n print('Не удалось подключиться к очереди')\n","sub_path":"rasks_on_pycharm_skillfactory/rabbitmq_test_service/2/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"79484634","text":"#!/usr/bin/python3\n\n#required libraries\nimport sys\nimport ssl\nimport paho.mqtt.client as mqtt\nimport json\n\nfrom time import sleep\nfrom sense_hat import SenseHat\nfrom datetime import datetime\nimport time\n\nsense = SenseHat()\nsense.clear()\nsense.show_message(\"Weather Station\")\nreportingFrequency = 300 #5 minutes\n\n#called while client tries to establish connection with the server \ndef on_connect(mqttc, obj, flags, rc):\n print(\"Subscriber Connection status code: \"+str(rc))\n mqttc.subscribe(\"station/message\",1)\n mqttc.subscribe(\"$aws/things/myRaspberryPi/shadow/update/#\",1)\n\n#called when a topic is successfully subscribed to\ndef on_subscribe(mqttc, obj, mid, granted_qos):\n print(\"Subscribed: \"+str(mid)+\" \"+str(granted_qos)+\"data\"+str(obj))\n\n#called when a message is received by a topic\ndef on_message(mqttc, obj, msg):\n print(\"Received message from topic: \"+msg.topic+\" | QoS: \"+str(msg.qos)+\" | Data Received: \"+str(msg.payload))\n message_json = json.loads(msg.payload.decode())\n sense.show_message(message_json['message'])\n\n#creating a client with client-id=myRaspberryPi\nmqttc = mqtt.Client(\"myRaspberryPi\")\nmqttc.on_connect = on_connect\nmqttc.on_subscribe = on_subscribe\nmqttc.on_message = on_message\n\n#Configure network encryption and authentication options. Enables SSL/TLS support.\n#adding client-side certificates and enabling tlsv1.2 support as required by aws-iot service\nmqttc.tls_set(\"/home/pi/python_mqtt/aws-iot-certs/aws-iot-rootCA.crt\",\n certfile=\"/home/pi/python_mqtt/aws-iot-certs/60b6bfb01e-certificate.pem.crt\",\n\t keyfile=\"/home/pi/python_mqtt/aws-iot-certs/60b6bfb01e-private.pem.key\",\n tls_version=ssl.PROTOCOL_TLSv1_2, \n ciphers=None)\n\n#connecting to aws-account-specific-iot-endpoint\nmqttc.connect(\"a1xugslbalqdxo.iot.us-east-1.amazonaws.com\", 8883, 60) #AWS IoT service hostname and portno\n\n\nshadowState = json.dumps({\n \"state\":{\n \"reported\": {\n \"message\": \"I am Raspberry\"\n }\n }\n })\n\n# update shadow status\nmqttc.publish(\"$aws/things/myRaspberryPi/shadow/update\", shadowState)\n\n# publish to a topic\n#mqttc.publish(\"testSDK/sub\", \"Hello AWS\")\n\n#automatically handles reconnecting\nlastValue = 0\nnextReport = time.time()-reportingFrequency\n\nwhile True:\n t = sense.get_temperature()\n p = sense.get_pressure()\n h = sense.get_humidity()\n\n t = round(t, 1)+32\n p = round(p, 1)\n h = round(h, 1)\n\n mqttc.loop()\n\n if (time.time()-nextReport > 0):\n lastValue = t\n nextReport = time.time()+reportingFrequency\n payload = json.dumps({\n \"weather\":{\n \"reported\": {\n \"temp\":str(t),\n \"pressure\":str(p),\n \"humidity\":str(h)\n },\n \"timestamp\": str(datetime.today())\n }\n })\n mqttc.publish(\"station/weather\", payload)\n print (\"Reporting: temp:\"+str(t)+\", pressure:\"+str(p)+\", humidity:\"+str(h))\n\n","sub_path":"python_scripts/iot_devices/iot-weather-station.py","file_name":"iot-weather-station.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"213580356","text":"import json\nfrom db import connect, disconnect, get_total_ingredient_counts, get_all_recipes\n\ndef getIngredientData():\n conn, cur = connect()\n\n ingredient_counts = get_total_ingredient_counts(conn)\n\n disconnect(conn, cur)\n\n return json.loads((json.dumps(ingredient_counts, indent=2)))\n\ndef getRecipeData():\n conn, cur = connect()\n recipes = get_all_recipes(conn)\n disconnect(conn, cur)\n return json.loads((json.dumps(recipes, indent=2)))\n\n\n\n","sub_path":"webscraper/dump_recipe_and_ingredient_data.py","file_name":"dump_recipe_and_ingredient_data.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"510042180","text":"import os\nimport json\nfrom tabulate import tabulate\nfrom hyperopt import Trials, STATUS_OK, tpe\nfrom hyperas import optim\nfrom hyperas.distributions import choice, uniform\nfrom keras.layers import Dense, Dropout, Input, LSTM, Flatten\nfrom keras.constraints import maxnorm\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom stochnet.classes.TimeSeriesDataset import TimeSeriesDataset\nfrom stochnet.classes.NeuralNetworks import StochNeuralNetwork\nfrom stochnet.classes.TopLayers import MultivariateNormalCholeskyOutputLayer, MixtureOutputLayer\nfrom stochnet.utils.iterator import HDF5Iterator\n\n\ndef data():\n '''\n Data providing function:\n\n This function is separated from model() so that hyperopt\n won't reload data for each evaluation run.\n '''\n\n dataset_address = '/home/lpalmier/workspace/Data/SIR/SIR_dataset_timestep_2-5_06.hdf5'\n validation_dataset_address = '/home/lpalmier/workspace/Data/SIR/SIR_dataset_timestep_2-5_02.hdf5'\n\n nb_past_timesteps = 1\n\n dataset = TimeSeriesDataset(dataset_address=dataset_address, data_format='hdf5')\n validation_dataset = TimeSeriesDataset(dataset_address=validation_dataset_address, data_format='hdf5')\n\n filepath_for_saving_no_split = '/home/lpalmier/workspace/Data/SIR/SIR_dataset_timestep_2-5_06_no_split.hdf5'\n filepath_for_saving_w_split = '/home/lpalmier/workspace/Data/SIR/SIR_dataset_timestep_2-5_06_w_split.hdf5'\n dataset.format_dataset_for_ML(nb_past_timesteps=nb_past_timesteps, must_be_rescaled=True, positivity=None,\n percentage_of_test_data=0.0,\n filepath_for_saving_no_split=filepath_for_saving_no_split,\n filepath_for_saving_w_split=filepath_for_saving_w_split)\n\n filepath_for_saving_val_no_split = '/home/lpalmier/workspace/Data/SIR/SIR_dataset_timestep_2-5_02_no_split.hdf5'\n filepath_for_saving_val_w_split = '/home/lpalmier/workspace/Data/SIR/SIR_dataset_timestep_2-5_02_w_split.hdf5'\n validation_dataset.format_dataset_for_ML(nb_past_timesteps=nb_past_timesteps, must_be_rescaled=True, positivity=None,\n percentage_of_test_data=0.0,\n filepath_for_saving_no_split=filepath_for_saving_val_no_split,\n filepath_for_saving_w_split=filepath_for_saving_val_w_split)\n nb_features = dataset.nb_features\n\n training_filepath = filepath_for_saving_w_split\n validation_filepath = filepath_for_saving_val_w_split\n\n batch_size = 64\n X_train = HDF5Iterator(training_filepath, batch_size=batch_size,\n shuffle=True, X_label='X_train', y_label='y_train')\n X_test = HDF5Iterator(validation_filepath, batch_size=batch_size, shuffle=True, X_label='X_train', y_label='y_train')\n Y_train = None\n Y_test = None\n return X_train, X_test, Y_train, Y_test\n\n\ndef model(X_train, Y_train, X_test, Y_test):\n \"\"\"\n Model providing function:\n Create Keras model with double curly brackets dropped-in as needed.\n Return value has to be a valid python dictionary with two customary keys:\n - loss: Specify a numeric evaluation metric to be minimized\n - status: Just use STATUS_OK and see hyperopt documentation if not feasible\n The last one is optional, though recommended, namely:\n - model: specify the model just created so that we can later use it again.\n \"\"\"\n input_tensor = Input(shape=(1, 3))\n flatten1 = Flatten()(input_tensor)\n hidden1 = Dense({{choice([64, 128, 256, 512, 1024, 2048, 4096])}}, kernel_constraint=maxnorm({{uniform(1, 3)}}))(flatten1)\n dropout1 = Dropout({{uniform(0.2, 0.7)}})(hidden1)\n dense2 = Dense({{choice([64, 128, 256, 512, 1024])}}, kernel_constraint=maxnorm({{uniform(1, 3)}}))(dropout1)\n dropout2 = Dropout({{uniform(0.2, 0.7)}})(dense2)\n NN_body = Dense({{choice([64, 128, 256, 512, 1024, 2048])}}, kernel_constraint=maxnorm({{uniform(1, 3)}}))(dropout2)\n\n number_of_components = 2\n components = []\n for j in range(number_of_components):\n components.append(MultivariateNormalCholeskyOutputLayer(3))\n TopModel_obj = MixtureOutputLayer(components)\n\n NN = StochNeuralNetwork(input_tensor, NN_body, TopModel_obj)\n\n callbacks = []\n callbacks.append(EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='min'))\n result = NN.fit_generator(X_train, samples_per_epoch=10**5,\n epochs={{choice([3, 6, 9, 12])}},\n verbose=1,\n callbacks=callbacks,\n validation_generator=X_test)\n\n parameters = space\n val_loss = min(result.history['val_loss'])\n parameters[\"val_loss\"] = val_loss\n print('Validation loss: {0}'.format(val_loss))\n\n if 'results' not in globals():\n global results\n results = []\n\n results.append(parameters)\n print(tabulate(results, headers=\"keys\", tablefmt=\"fancy_grid\", floatfmt=\".8f\"))\n with open('/home/lpalmier/workspace/model_tuning/SIR/SIR_model_tuning_MNC_01.json', 'w') as f:\n f.write(json.dumps(results))\n return {'loss': val_loss, 'status': STATUS_OK, 'model': NN.model}\n\n\nif __name__ == '__main__':\n best_run, best_model = optim.minimize(model=model,\n data=data,\n algo=tpe.suggest,\n max_evals=20,\n trials=Trials())\n X_train, Y_train, X_test, Y_test = data()\n","sub_path":"model_selection_w_hdf5.py","file_name":"model_selection_w_hdf5.py","file_ext":"py","file_size_in_byte":5543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"574008184","text":"import time\nimport os\nimport subprocess\nimport json \nfrom PIL import Image\n\nclass InternetModel:\n def __init__(self):\n self.upload_speed = 999\n self.download_speed = 999\n self.ping = 999\n\n def get_ip_address(self):\n ip = subprocess.check_output(\"hostname -I | grep -Eo '^[^ ]+'\", shell=True)\n stripped = ip.rstrip()\n return stripped.decode('utf-8')\n\n def get_wifi_name(self):\n try:\n hostname = subprocess.check_output(\"iwgetid -r\", shell=True)\n stripped = hostname.rstrip()\n return stripped.decode('utf-8')\n except subprocess.CalledProcessError:\n return \"Unknown\"\n\n def get_available_disk_space(self):\n disk_space = subprocess.check_output('df | awk \\'$1 == \"/dev/root\" { print $4 }\\'', shell=True)\n stripped = disk_space.rstrip()\n decoded = stripped.decode('utf-8')\n\n value = round((int(decoded) / 1000000), 2)\n \n return str(value)\n\n def get_total_ram(self):\n ram = subprocess.check_output('free -m | awk \\'$1 == \"Mem:\" { print $2 }\\'', shell=True)\n stripped = ram.rstrip()\n decoded = stripped.decode('utf-8')\n\n value = round((int(decoded)), 2)\n \n return str(value)\n\n def get_used_ram(self):\n ram = subprocess.check_output('free -m | awk \\'$1 == \"Mem:\" { print $3 }\\'', shell=True)\n stripped = ram.rstrip()\n decoded = stripped.decode('utf-8')\n\n value = round((int(decoded)), 2)\n \n return str(value)\n\n def shutdown(self):\n subprocess.Popen([\"shutdown\", \"now\"])\n\n def restart(self):\n subprocess.Popen([\"reboot\", \"now\"])\n\n\n def get_total_disk_space(self):\n disk_space = subprocess.check_output('df | awk \\'$1 == \"/dev/root\" { print $2 }\\'', shell=True)\n stripped = disk_space.rstrip()\n decoded = stripped.decode('utf-8')\n\n value = round((int(decoded) / 1000000), 2)\n \n return str(value)\n\n\n def execute_speed_test(self):\n data = subprocess.check_output('speedtest --simple', shell=True).decode(\"utf-8\")\n\n output = []\n\n splitted = data.split()\n for split in splitted:\n output.append(split)\n \n try:\n self.upload_speed = output[7]\n self.download_speed = output[4]\n self.ping = output[1]\n except IndexError:\n pass\n\n def get_logged_in_users(self):\n users = subprocess.check_output(\"last | grep 'still logged in' | wc -l\", shell = True)\n stripped = users.rstrip()\n decoded = stripped.decode('utf-8')\n\n return str(decoded)\n\n\n def get_apod(self):\n os.system(\"curl https://api.nasa.gov/planetary/apod?api_key=DEMO_KEY > apod.txt\")\n\n def get_apod_image(self):\n image = subprocess.check_output('cat apod.txt', shell=True).decode(\"utf-8\")\n data = json.loads(image)\n\n os.system(\"curl \" + data[\"url\"] + \" > apod.jpg\")\n\n basewidth = 776\n img = Image.open('apod.jpg')\n wpercent = (basewidth/float(img.size[0]))\n hsize = int((float(img.size[1])*float(wpercent)))\n img = img.resize((basewidth,hsize), Image.ANTIALIAS)\n img.save('apod.jpg') \n\n def get_apod_text(self):\n image = subprocess.check_output('cat apod.txt', shell=True).decode(\"utf-8\")\n data = json.loads(image)\n return data[\"title\"]\n\n def get_apod_explanation(self):\n image = subprocess.check_output('cat apod.txt', shell=True).decode(\"utf-8\")\n data = json.loads(image)\n return data[\"explanation\"]\n","sub_path":"bin/models/internetmodel.py","file_name":"internetmodel.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"526418793","text":"# Microsoft Azure Linux Agent\n#\n# Copyright 2018 Microsoft Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Requires Python 2.6+ and Openssl 1.0+\n#\n\nimport base64\nimport datetime\nimport json\nimport uuid\n\nfrom azurelinuxagent.common import logger\nfrom azurelinuxagent.common.errorstate import ErrorState, ERROR_STATE_HOST_PLUGIN_FAILURE\nfrom azurelinuxagent.common.event import WALAEventOperation, add_event\nfrom azurelinuxagent.common.exception import HttpError, ProtocolError\nfrom azurelinuxagent.common.future import ustr\nfrom azurelinuxagent.common.protocol.healthservice import HealthService\nfrom azurelinuxagent.common.utils import restutil\nfrom azurelinuxagent.common.utils import textutil\nfrom azurelinuxagent.common.utils.textutil import remove_bom\nfrom azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, PY_VERSION_MAJOR\n\nHOST_PLUGIN_PORT = 32526\n\nURI_FORMAT_GET_API_VERSIONS = \"http://{0}:{1}/versions\"\nURI_FORMAT_VM_SETTINGS = \"http://{0}:{1}/vmSettings\"\nURI_FORMAT_GET_EXTENSION_ARTIFACT = \"http://{0}:{1}/extensionArtifact\"\nURI_FORMAT_PUT_VM_STATUS = \"http://{0}:{1}/status\"\nURI_FORMAT_PUT_LOG = \"http://{0}:{1}/vmAgentLog\"\nURI_FORMAT_HEALTH = \"http://{0}:{1}/health\"\n\nAPI_VERSION = \"2015-09-01\"\n\n_HEADER_CLIENT_NAME = \"x-ms-client-name\"\n_HEADER_CLIENT_VERSION = \"x-ms-client-version\"\n_HEADER_CORRELATION_ID = \"x-ms-client-correlationid\"\n_HEADER_CONTAINER_ID = \"x-ms-containerid\"\n_HEADER_DEPLOYMENT_ID = \"x-ms-vmagentlog-deploymentid\"\n_HEADER_VERSION = \"x-ms-version\"\n_HEADER_HOST_CONFIG_NAME = \"x-ms-host-config-name\"\n_HEADER_ARTIFACT_LOCATION = \"x-ms-artifact-location\"\n_HEADER_ARTIFACT_MANIFEST_LOCATION = \"x-ms-artifact-manifest-location\"\n\nMAXIMUM_PAGEBLOB_PAGE_SIZE = 4 * 1024 * 1024 # Max page size: 4MB\n\n\nclass HostPluginProtocol(object):\n is_default_channel = False\n\n FETCH_REPORTING_PERIOD = datetime.timedelta(minutes=1)\n STATUS_REPORTING_PERIOD = datetime.timedelta(minutes=1)\n\n def __init__(self, endpoint, container_id, role_config_name):\n if endpoint is None:\n raise ProtocolError(\"HostGAPlugin: Endpoint not provided\")\n self.is_initialized = False\n self.is_available = False\n self.api_versions = None\n self.endpoint = endpoint\n self.container_id = container_id\n self.deployment_id = self._extract_deployment_id(role_config_name)\n self.role_config_name = role_config_name\n self.manifest_uri = None\n self.health_service = HealthService(endpoint)\n self.fetch_error_state = ErrorState(min_timedelta=ERROR_STATE_HOST_PLUGIN_FAILURE)\n self.status_error_state = ErrorState(min_timedelta=ERROR_STATE_HOST_PLUGIN_FAILURE)\n self.fetch_last_timestamp = None\n self.status_last_timestamp = None\n\n @staticmethod\n def _extract_deployment_id(role_config_name):\n # Role config name consists of: .(...)\n return role_config_name.split(\".\")[0] if role_config_name is not None else None\n\n def update_container_id(self, new_container_id):\n self.container_id = new_container_id\n\n def update_role_config_name(self, new_role_config_name):\n self.role_config_name = new_role_config_name\n self.deployment_id = self._extract_deployment_id(new_role_config_name)\n\n def update_manifest_uri(self, new_manifest_uri):\n self.manifest_uri = new_manifest_uri\n\n def ensure_initialized(self):\n if not self.is_initialized:\n self.api_versions = self.get_api_versions()\n self.is_available = API_VERSION in self.api_versions\n self.is_initialized = self.is_available\n add_event(op=WALAEventOperation.InitializeHostPlugin,\n is_success=self.is_available)\n return self.is_available\n\n def get_health(self):\n \"\"\"\n Call the /health endpoint\n :return: True if 200 received, False otherwise\n \"\"\"\n url = URI_FORMAT_HEALTH.format(self.endpoint,\n HOST_PLUGIN_PORT)\n logger.verbose(\"HostGAPlugin: Getting health from [{0}]\", url)\n response = restutil.http_get(url, max_retry=1)\n return restutil.request_succeeded(response)\n\n def get_api_versions(self):\n url = URI_FORMAT_GET_API_VERSIONS.format(self.endpoint,\n HOST_PLUGIN_PORT)\n logger.verbose(\"HostGAPlugin: Getting API versions at [{0}]\"\n .format(url))\n return_val = []\n error_response = ''\n is_healthy = False\n try:\n headers = {_HEADER_CONTAINER_ID: self.container_id}\n response = restutil.http_get(url, headers)\n\n if restutil.request_failed(response):\n error_response = restutil.read_response_error(response)\n logger.error(\"HostGAPlugin: Failed Get API versions: {0}\".format(error_response))\n is_healthy = not restutil.request_failed_at_hostplugin(response)\n else:\n return_val = ustr(remove_bom(response.read()), encoding='utf-8')\n is_healthy = True\n except HttpError as e:\n logger.error(\"HostGAPlugin: Exception Get API versions: {0}\".format(e))\n\n self.health_service.report_host_plugin_versions(is_healthy=is_healthy, response=error_response)\n\n return return_val\n\n def get_vm_settings_request(self, correlation_id):\n url = URI_FORMAT_VM_SETTINGS.format(self.endpoint, HOST_PLUGIN_PORT)\n\n headers = {\n _HEADER_VERSION: API_VERSION,\n _HEADER_CONTAINER_ID: self.container_id,\n _HEADER_HOST_CONFIG_NAME: self.role_config_name,\n _HEADER_CORRELATION_ID: correlation_id\n }\n\n return url, headers\n\n def get_artifact_request(self, artifact_url, artifact_manifest_url=None):\n if not self.ensure_initialized():\n raise ProtocolError(\"HostGAPlugin: Host plugin channel is not available\")\n\n if textutil.is_str_none_or_whitespace(artifact_url):\n raise ProtocolError(\"HostGAPlugin: No extension artifact url was provided\")\n\n url = URI_FORMAT_GET_EXTENSION_ARTIFACT.format(self.endpoint,\n HOST_PLUGIN_PORT)\n headers = {_HEADER_VERSION: API_VERSION,\n _HEADER_CONTAINER_ID: self.container_id,\n _HEADER_HOST_CONFIG_NAME: self.role_config_name,\n _HEADER_ARTIFACT_LOCATION: artifact_url}\n\n if artifact_manifest_url is not None:\n headers[_HEADER_ARTIFACT_MANIFEST_LOCATION] = artifact_manifest_url\n\n return url, headers\n\n def report_fetch_health(self, uri, is_healthy=True, source='', response=''):\n\n if uri != URI_FORMAT_GET_EXTENSION_ARTIFACT.format(self.endpoint, HOST_PLUGIN_PORT):\n return\n\n if self.should_report(is_healthy,\n self.fetch_error_state,\n self.fetch_last_timestamp,\n HostPluginProtocol.FETCH_REPORTING_PERIOD):\n self.fetch_last_timestamp = datetime.datetime.utcnow()\n health_signal = self.fetch_error_state.is_triggered() is False\n self.health_service.report_host_plugin_extension_artifact(is_healthy=health_signal,\n source=source,\n response=response)\n\n def report_status_health(self, is_healthy, response=''):\n if self.should_report(is_healthy,\n self.status_error_state,\n self.status_last_timestamp,\n HostPluginProtocol.STATUS_REPORTING_PERIOD):\n self.status_last_timestamp = datetime.datetime.utcnow()\n health_signal = self.status_error_state.is_triggered() is False\n self.health_service.report_host_plugin_status(is_healthy=health_signal,\n response=response)\n\n @staticmethod\n def should_report(is_healthy, error_state, last_timestamp, period):\n \"\"\"\n Determine whether a health signal should be reported\n :param is_healthy: whether the current measurement is healthy\n :param error_state: the error state which is tracking time since failure\n :param last_timestamp: the last measurement time stamp\n :param period: the reporting period\n :return: True if the signal should be reported, False otherwise\n \"\"\"\n\n if is_healthy:\n # we only reset the error state upon success, since we want to keep\n # reporting the failure; this is different to other uses of error states\n # which do not have a separate periodicity\n error_state.reset()\n else:\n error_state.incr()\n\n if last_timestamp is None:\n last_timestamp = datetime.datetime.utcnow() - period\n\n return datetime.datetime.utcnow() >= (last_timestamp + period)\n\n def put_vm_log(self, content):\n \"\"\"\n Try to upload VM logs, a compressed zip file, via the host plugin /vmAgentLog channel.\n :param content: the binary content of the zip file to upload\n \"\"\"\n if not self.ensure_initialized():\n raise ProtocolError(\"HostGAPlugin: HostGAPlugin is not available\")\n\n if content is None:\n raise ProtocolError(\"HostGAPlugin: Invalid argument passed to upload VM logs. Content was not provided.\")\n\n url = URI_FORMAT_PUT_LOG.format(self.endpoint, HOST_PLUGIN_PORT)\n response = restutil.http_put(url,\n data=content,\n headers=self._build_log_headers(),\n redact_data=True)\n\n if restutil.request_failed(response):\n error_response = restutil.read_response_error(response)\n raise HttpError(\"HostGAPlugin: Upload VM logs failed: {0}\".format(error_response))\n\n return response\n\n def put_vm_status(self, status_blob, sas_url, config_blob_type=None):\n \"\"\"\n Try to upload the VM status via the host plugin /status channel\n :param sas_url: the blob SAS url to pass to the host plugin\n :param config_blob_type: the blob type from the extension config\n :type status_blob: StatusBlob\n \"\"\"\n if not self.ensure_initialized():\n raise ProtocolError(\"HostGAPlugin: HostGAPlugin is not available\")\n\n if status_blob is None or status_blob.vm_status is None:\n raise ProtocolError(\"HostGAPlugin: Status blob was not provided\")\n\n logger.verbose(\"HostGAPlugin: Posting VM status\")\n blob_type = status_blob.type if status_blob.type else config_blob_type\n\n if blob_type == \"BlockBlob\":\n self._put_block_blob_status(sas_url, status_blob)\n else:\n self._put_page_blob_status(sas_url, status_blob)\n\n def _put_block_blob_status(self, sas_url, status_blob):\n url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT)\n\n response = restutil.http_put(url,\n data=self._build_status_data(\n sas_url,\n status_blob.get_block_blob_headers(len(status_blob.data)),\n bytearray(status_blob.data, encoding='utf-8')),\n headers=self._build_status_headers())\n\n if restutil.request_failed(response):\n error_response = restutil.read_response_error(response)\n is_healthy = not restutil.request_failed_at_hostplugin(response)\n self.report_status_health(is_healthy=is_healthy, response=error_response)\n raise HttpError(\"HostGAPlugin: Put BlockBlob failed: {0}\"\n .format(error_response))\n else:\n self.report_status_health(is_healthy=True)\n logger.verbose(\"HostGAPlugin: Put BlockBlob status succeeded\")\n\n def _put_page_blob_status(self, sas_url, status_blob):\n url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT)\n\n # Convert the status into a blank-padded string whose length is modulo 512\n status = bytearray(status_blob.data, encoding='utf-8')\n status_size = int((len(status) + 511) / 512) * 512\n status = bytearray(status_blob.data.ljust(status_size), encoding='utf-8')\n\n # First, initialize an empty blob\n response = restutil.http_put(url,\n data=self._build_status_data(\n sas_url,\n status_blob.get_page_blob_create_headers(status_size)),\n headers=self._build_status_headers())\n\n if restutil.request_failed(response):\n error_response = restutil.read_response_error(response)\n is_healthy = not restutil.request_failed_at_hostplugin(response)\n self.report_status_health(is_healthy=is_healthy, response=error_response)\n raise HttpError(\"HostGAPlugin: Failed PageBlob clean-up: {0}\"\n .format(error_response))\n else:\n self.report_status_health(is_healthy=True)\n logger.verbose(\"HostGAPlugin: PageBlob clean-up succeeded\")\n \n # Then, upload the blob in pages\n if sas_url.count(\"?\") <= 0:\n sas_url = \"{0}?comp=page\".format(sas_url)\n else:\n sas_url = \"{0}&comp=page\".format(sas_url)\n\n start = 0\n end = 0\n while start < len(status):\n # Create the next page\n end = start + min(len(status) - start, MAXIMUM_PAGEBLOB_PAGE_SIZE)\n page_size = int((end - start + 511) / 512) * 512\n buf = bytearray(page_size)\n buf[0: end - start] = status[start: end]\n\n # Send the page\n response = restutil.http_put(url,\n data=self._build_status_data(\n sas_url,\n status_blob.get_page_blob_page_headers(start, end),\n buf),\n headers=self._build_status_headers())\n\n if restutil.request_failed(response):\n error_response = restutil.read_response_error(response)\n is_healthy = not restutil.request_failed_at_hostplugin(response)\n self.report_status_health(is_healthy=is_healthy, response=error_response)\n raise HttpError(\n \"HostGAPlugin Error: Put PageBlob bytes \"\n \"[{0},{1}]: {2}\".format(start, end, error_response))\n\n # Advance to the next page (if any)\n start = end\n \n def _build_status_data(self, sas_url, blob_headers, content=None):\n headers = []\n for name in iter(blob_headers.keys()):\n headers.append({\n 'headerName': name,\n 'headerValue': blob_headers[name]\n })\n\n data = {\n 'requestUri': sas_url,\n 'headers': headers\n }\n if not content is None:\n data['content'] = self._base64_encode(content)\n return json.dumps(data, sort_keys=True)\n \n def _build_status_headers(self):\n return {\n _HEADER_VERSION: API_VERSION,\n \"Content-type\": \"application/json\",\n _HEADER_CONTAINER_ID: self.container_id,\n _HEADER_HOST_CONFIG_NAME: self.role_config_name\n }\n\n def _build_log_headers(self):\n return {\n _HEADER_VERSION: API_VERSION,\n _HEADER_CONTAINER_ID: self.container_id,\n _HEADER_DEPLOYMENT_ID: self.deployment_id,\n _HEADER_CLIENT_NAME: AGENT_NAME,\n _HEADER_CLIENT_VERSION: AGENT_VERSION,\n _HEADER_CORRELATION_ID: str(uuid.uuid4())\n }\n\n def _base64_encode(self, data):\n s = base64.b64encode(bytes(data))\n if PY_VERSION_MAJOR > 2:\n return s.decode('utf-8')\n return s\n","sub_path":"azurelinuxagent/common/protocol/hostplugin.py","file_name":"hostplugin.py","file_ext":"py","file_size_in_byte":16816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"609861783","text":"from fastapi import APIRouter\n\nfrom ..factory import app\n\nrouter = APIRouter()\n\n\n@router.get(\n '/home',\n)\nasync def home():\n return {\n 'title': \"Welcome to REST ADMIN\"\n }\n\n\n@router.get(\n '/site',\n)\nasync def site():\n return app.site.dict(by_alias=True, exclude_unset=True)\n","sub_path":"fastapi_admin/routes/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"161018959","text":"\"\"\"\ngame.py - Test the game module\n\nAugust 2019, Lewis Gaul\n\nUses pytest - simply run 'python -m pytest tests/ [-k game_test]' from\nthe root directory.\n\"\"\"\n\nfrom unittest.mock import Mock\n\nfrom minegauler.core.game import Game, ignore_if, ignore_if_not\nfrom minegauler.types import *\n\n\ndef test_ignore_if_decorators():\n \"\"\"\n Test the 'ignore if' and 'ignore if not' decorators, since they aren't\n fully used in the code.\n \"\"\"\n game = Game(x_size=4, y_size=5, mines=5)\n mock_func = Mock()\n\n # First test 'ignore if'.\n # Test with one ignored cell state (flagged).\n decorated_mock = ignore_if(cell_state=CellFlag)(mock_func)\n decorated_mock(game, (0, 0)) # unclicked\n mock_func.assert_called_once()\n mock_func.reset_mock()\n\n game.set_cell_flags((0, 0), 1)\n decorated_mock(game, (0, 0)) # flagged\n mock_func.assert_not_called()\n\n # Test with multiple ignored cell states.\n decorated_mock = ignore_if(cell_state=(CellFlag, CellUnclicked))(mock_func)\n decorated_mock(game, (0, 0)) # flagged\n mock_func.assert_not_called()\n\n decorated_mock(game, (0, 1)) # unclicked\n mock_func.assert_not_called()\n\n # Next test 'ignore if not'.\n mock_func.reset_mock()\n # Test with one game state (READY).\n decorated_mock = ignore_if_not(game_state=GameState.READY)(mock_func)\n game.state = GameState.READY\n decorated_mock(game)\n mock_func.assert_called_once()\n mock_func.reset_mock()\n\n game.state = GameState.ACTIVE\n decorated_mock(game)\n mock_func.assert_not_called()\n\n # Test with multiple ignored cell states.\n decorated_mock = ignore_if_not(game_state=(GameState.READY, GameState.ACTIVE))(\n mock_func\n )\n game.state = GameState.READY\n decorated_mock(game)\n mock_func.assert_called_once()\n mock_func.reset_mock()\n\n game.state = GameState.ACTIVE\n decorated_mock(game)\n mock_func.assert_called_once()\n mock_func.reset_mock()\n\n game.state = GameState.LOST\n decorated_mock(game)\n mock_func.assert_not_called()\n","sub_path":"tests/mut/core/game_test.py","file_name":"game_test.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"580160771","text":"import numpy as np\nimport utils as Util\n\n\nclass DecisionTree():\n def __init__(self):\n self.clf_name = \"DecisionTree\"\n self.root_node = None\n\n def train(self, features, labels):\n # features: List[List[float]], labels: List[int]\n # init\n assert (len(features) > 0)\n num_cls = np.unique(labels).size\n\n # build the tree\n self.root_node = TreeNode(features, labels, num_cls)\n if self.root_node.splittable:\n self.root_node.split()\n return\n\n def predict(self, features):\n # features: List[List[any]]\n # return List[int]\n y_pred = []\n for idx, feature in enumerate(features):\n pred = self.root_node.predict(feature)\n y_pred.append(pred)\n return y_pred\n\n\nclass TreeNode(object):\n def __init__(self, features, labels, num_cls):\n # features: List[List[any]], labels: List[int], num_cls: int\n self.features = features\n self.labels = labels\n self.children = []\n self.num_cls = num_cls\n self.parent = None\n self.prune_accuracy = 0\n # find the most common labels in current node\n count_max = 0\n for label in np.unique(labels):\n if self.labels.count(label) > count_max:\n count_max = labels.count(label)\n self.cls_max = label\n # splitable is false when all features belongs to one class\n if len(np.unique(labels)) < 2:\n self.splittable = False\n else:\n self.splittable = True\n\n self.dim_split = None # the index of the feature to be split\n\n self.feature_uniq_split = None # the possible unique values of the feature to be split\n\n def split(self):\n features = np.array(self.features)\n number_of_attributes = features[0].size\n\n if(number_of_attributes != 0):\n\n max_info_gain = -1\n max_unique_values = 0\n max_attr_number = 0\n\n #Entropy for root:\n root_branch = []\n root_label_count = []\n for label in np.unique(self.labels):\n root_label_count.append(self.labels.count(label))\n root_branch.append(root_label_count)\n entropy_root = Util.Information_Gain(0, root_branch)\n entropy_root *= -1\n\n #Split according to attributes\n for attr_number in range(number_of_attributes):\n unique_values = np.unique(features[:,attr_number])\n splits = unique_values.size\n branches = []\n children = []\n\n np.sort(unique_values)\n for unique_value in unique_values:\n branch = []\n branch_feat = []\n branch_label = []\n branch_label_count = []\n zero_label = 0\n one_label = 0\n for pos, feature in enumerate(features):\n if(feature[attr_number] == unique_value):\n branch_feat.append(np.delete(feature, attr_number))\n branch_label.append(self.labels[pos])\n\n for label in np.unique(self.labels):\n branch_label_count.append(branch_label.count(label))\n\n branches.append(branch_label_count)\n child = TreeNode(branch_feat, branch_label, np.unique(branch_label).size)\n child.parent = self\n children.append(child)\n\n # Check Information Gain for each attribute\n info_gain = Util.Information_Gain(entropy_root, branches)\n\n if(info_gain > max_info_gain):\n max_info_gain = info_gain\n max_unique_values = unique_values.size\n max_attr_number = attr_number\n self.children = children\n self.dim_split = attr_number\n self.feature_uniq_split = unique_values\n\n elif(info_gain == max_info_gain):\n if(unique_values.size > max_unique_values):\n max_info_gain = info_gain\n max_unique_values = unique_values.size\n max_attr_number = attr_number\n self.children = children\n self.dim_split = attr_number\n self.feature_uniq_split = unique_values\n\n elif(unique_values.size == max_unique_values):\n if(attr_number < max_attr_number):\n max_info_gain = info_gain\n max_unique_values = unique_values.size\n max_attr_number = attr_number\n self.children = children\n self.dim_split = attr_number\n self.feature_uniq_split = unique_values\n if(max_info_gain == 0.0):\n self.children = []\n self.dim_split = None\n self.feature_uniq_split = None\n self.splittable = False\n return\n\n for child in self.children:\n if(child.splittable is True):\n child.split()\n else:\n self.splittable = False\n\n\n def predict(self, feature):\n # feature: List[any]\n # return: int\n if self.splittable:\n child_pos = np.where(self.feature_uniq_split == feature[self.dim_split])\n return self.children[child_pos[0].tolist()[0]].predict(np.delete(feature, self.dim_split))\n else:\n return self.cls_max\n","sub_path":"Decision Trees/dt.py","file_name":"dt.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"533685208","text":"\"\"\"\nDisplay one markers layer ontop of one image layer using the add_markers and\nadd_image APIs\n\"\"\"\n\nimport numpy as np\nfrom skimage import data\nfrom skimage.color import rgb2gray\nfrom napari import ViewerApp\nfrom napari.util import app_context\n\n\nwith app_context():\n # create the viewer and window\n viewer = ViewerApp()\n\n # add the image\n viewer.add_image(rgb2gray(data.astronaut()))\n # add the markers\n markers = np.array([[100, 100], [200, 200], [333, 111]])\n size = np.array([10, 20, 20])\n viewer.add_markers(markers, size=size)\n\n # unselect the image layer\n viewer.layers[0].selected = False\n\n # adjust some of the marker layer properties\n layer = viewer.layers[1]\n\n # change the layer name\n layer.name = 'spots'\n\n # change the layer visibility\n layer.visible = False\n layer.visible = True\n\n # change the layer selection\n layer.selected = False\n layer.selected = True\n\n # set the layer property widget to be expanded\n layer._qt_properties.setExpanded(True)\n\n # change the layer opacity\n layer.opacity = 0.9\n\n # change the layer blending mode\n layer.blending = 'opaque'\n layer.blending = 'translucent'\n\n # change the layer marker face color\n layer.face_color = 'white'\n\n # change the layer marker edge color\n layer.edge_color = 'blue'\n\n # change the layer marker symbol using an alias\n layer.symbol = '+'\n\n # change the layer marker n_dimensional status\n layer.n_dimensional = True\n\n # change the layer marker size\n layer.size = 20\n\n # change the layer mode\n layer.mode = 'add'\n","sub_path":"examples/add_markers.py","file_name":"add_markers.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"118298965","text":"\"\"\" Usually when you buy something,\n you're asked whether your credit\n card number, phone number or answer\n to your most secret question is still correct.\n However, since someone could look over your\n shoulder, you don't want that shown on your\n screen. Instead, we mask it.\n Your task is to write a function maskify,\n which changes all but the last four characters\n into '#'.\"\"\"\n# variant # 1\n#def maskify(cc):\n# s = \"\"\n# if len(cc) > 4:\n# for k in range(len(cc)):\n# if k < (len(cc)-4):\n# s += \"#\"\n# else:\n# s += cc[k]\n# return s\n# else:\n# return cc\n\ndef maskify(cc):\n l = len(cc)\n if len(cc) < 4 :\n return cc\n else:\n return (l-4)*\"#\"+cc[l-4:]\n\ndef test_maskify():\n s = \"4556364607935616\"\n s_result = \"############5616\"\n print(\"Test 1 is Ok \" if maskify(s) == s_result else \"Test 1 is Fail\")\n\n s = \"64607935616\"\n s_result = \"#######5616\"\n print(\"Test 2 is Ok \" if maskify(s) == s_result else \"Test 2 is Fail\")\n\n s = \"1\"\n s_result = \"1\"\n print(\"Test 3 is Ok \" if maskify(s) == s_result else \"Test 3 is Fail\")\n\n s = \"\"\n s_result = \"\"\n print(\"Test 4 is Ok \" if maskify(s) == s_result else \"Test 4 is Fail\")\n\ntest_maskify()","sub_path":"codewars.com/codewars_train_4.py","file_name":"codewars_train_4.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"454502102","text":"import numpy as np\nimport ctypes\nimport sys\nsys.path.append('/home/hungjinh/c_lib/cosmolike2/des_real/code_PCA/')\nfrom cosmolike_lib import *\n\nclass model_xi():\n \n def __init__(self):\n self.arcmin2radian=2.90888208665721580e-4\n self.theta(Ntheta=20,theta_min_arcmin=2.5,theta_max_arcmin=250)\n self.tomo_binning(Ntomo_source=4,Ntomo_lens=5)\n self.gen_xi_bin_id_order()\n\n \n def theta(self,Ntheta=20,theta_min_arcmin=2.5,theta_max_arcmin=250):\n self.Ntheta=Ntheta\n vtmin = theta_min_arcmin*self.arcmin2radian\n vtmax = theta_max_arcmin*self.arcmin2radian\n logdt = (np.log(vtmax)-np.log(vtmin))/self.Ntheta\n \n thetamin = np.ndarray(self.Ntheta)\n thetamax = np.ndarray(self.Ntheta)\n self.theta = np.ndarray(self.Ntheta)\n \n for j in range(self.Ntheta):\n thetamin[j] = np.exp(np.log(vtmin)+(j+0.0)*logdt)\n thetamax[j] = np.exp(np.log(vtmin)+(j+1.0)*logdt)\n self.theta[j] = 2./3.*(thetamax[j]**3-thetamin[j]**3)/(thetamax[j]**2-thetamin[j]**2)\n \n self.theta_arcmin=self.theta/self.arcmin2radian\n\n \n def tomo_binning(self,Ntomo_source=4,Ntomo_lens=5):\n self.Ntomo_source=Ntomo_source\n self.Ntomo_lens=Ntomo_lens\n self.clustering_Npowerspectra = self.Ntomo_lens # 5\n self.ggl_Npowerspectra = self.Ntomo_lens*self.Ntomo_source # 20\n self.shear_Npowerspectra = self.Ntomo_source*(self.Ntomo_source+1)/2 # 10\n self.total_Npowerspectra = 2*self.shear_Npowerspectra + self.ggl_Npowerspectra + self.clustering_Npowerspectra # 45\n self.Ndata_shear = self.Ntheta*self.shear_Npowerspectra\n self.Ndata_ggl = self.Ntheta*self.ggl_Npowerspectra \n self.Ndata_clustering = self.Ntheta*self.clustering_Npowerspectra\n self.Ndata = self.total_Npowerspectra*self.Ntheta\n\n \n def gen_xi_bin_id_order(self):\n\n # shear-shear-p or m\n self.z1_id=[]\n for j in range(self.Ntomo_source):\n temp=[j]*(self.Ntomo_source-j)\n self.z1_id=self.z1_id+temp\n\n self.z2_id=[]\n for j in range(self.Ntomo_source):\n temp = range(j,self.Ntomo_source)\n self.z2_id=self.z2_id+temp\n\n \n def init_cosmolike(self):\n \n init_cosmo_runmode(\"emu\")\n init_bary(\"dmo\")\n init_source_sample_mpp(\"/home/hungjinh/c_lib/cosmolike2/des_real/cov_Y1/source.nz\",self.Ntomo_source) \n \n b1_ptr = self.make_double_ptr([1.45, 1.55, 1.65, 1.8, 2.0]) \n b2_ptr = self.make_double_ptr([0. , 0. , 0. , 0. , 0. ]) \n init_lens_sample_mpp(\"/home/hungjinh/c_lib/cosmolike2/des_real/cov_Y1/lens.nz\",self.Ntomo_lens,b1_ptr,b2_ptr,0.1) \n \n init_IA_mpp(4)\n init_priors(\"none\",\"none\",\"none\",\"none\")\n init_binning_mpp(20,2.5,250.)\n init_probes_real_mpp(\"3x2pt\")\n \n mean_m_ptr = self.make_double_ptr(shear_m_mean+[0]*6)\n sigma_m_ptr = self.make_double_ptr(shear_m_sigma+[0]*6)\n set_shear_priors_mpp(mean_m_ptr,sigma_m_ptr)\n \n bias_photoz_s_ptr = self.make_double_ptr(source_z_bias_mean+[0]*6)\n sigma_b_photoz_s_ptr = self.make_double_ptr(source_z_bias_sigma+[0.08]*6)\n set_wlphotoz_priors_mpp(bias_photoz_s_ptr,sigma_b_photoz_s_ptr)\n \n bias_photoz_l_ptr = self.make_double_ptr(lens_z_bias_mean+[0]*5)\n sigma_b_photoz_ptr = self.make_double_ptr(lens_z_bias_sigma+[0.04]*5)\n set_clphotoz_priors_mpp(bias_photoz_l_ptr,sigma_b_photoz_ptr)\n \n def input_cosmolike_par(self,pco):\n '''\n Example inputs:\n pco = [0.295, 0.831, 0.9676, -1., 0.0468, 0.6881]\n ''' \n \n bias_lens = [1.45, 1.55, 1.65, 1.8, 2.0]\n IA = [0., 0.]\n PZbias_lens = [0., 0., 0., 0., 0.]\n PZbias_source = [0., 0., 0., 0.]\n m_shear = [0., 0., 0., 0.]\n \n # Om s8 ns w0 wa Ob Onuh2 h MGSigma MGmu\n set_cosmology_params(pco[0], pco[1], pco[2], pco[3] ,0.0 ,pco[4], 0.0006155, pco[5], 0.0, 0.0)\n \n set_nuisance_gbias(bias_lens[0],bias_lens[1],bias_lens[2],bias_lens[3],bias_lens[4],0.,0.,0.,0.,0.,\n 0., 0., 0., 0., 0.,0.,0.,0.,0.,0.)\n\n set_nuisance_ia_mpp(IA[0],IA[1],0.,0.,0.,0.,0.,0.,0.,0.)\n \n set_nuisance_clustering_photoz(PZbias_lens[0],PZbias_lens[1],PZbias_lens[2],PZbias_lens[3],PZbias_lens[4],0.,0.,0.,0.,0.,)\n \n set_nuisance_shear_photoz(PZbias_source[0],PZbias_source[1],PZbias_source[2],PZbias_source[3],0.,0.,0.,0.,0.,0.,)\n \n set_nuisance_shear_calib(m_shear[0],m_shear[1],m_shear[2],m_shear[3],0.,0.,0.,0.,0.,0.)\n \n \n def gen_xip(self,pco,bias_lens,IA,PZbias_lens,PZbias_source,m_shear):\n # need to use self.input_cosmolike_par(pco) first\n \n Model_xip = [0]*self.Ndata_shear\n Model_xim = [0]*self.Ndata_shear\n \n \n #for j in range(self.Ndata_shear):\n # Model_xip[j]=xi_shear_tomo_sys(1,SC.DFcompute_xip[\"theta_rad\"][j],SC.DFcompute_xip[\"z1_bin\"][j],SC.DFcompute_xip[\"z2_bin\"][j])\n \n #for j in range(self.Ndata_shear):\n # Model_xim[j]=xi_shear_tomo_sys(-1,SC.DFcompute_xim[\"theta_rad\"][j],SC.DFcompute_xim[\"z1_bin\"][j],SC.DFcompute_xim[\"z2_bin\"][j])\n \n #return np.array(Model_xip+Model_xim)\n \n \n ","sub_path":"code/.ipynb_checkpoints/gen_model-checkpoint.py","file_name":"gen_model-checkpoint.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"251972953","text":"from discord.ext import commands\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass Miscellaneous(commands.Cog, name='Miscellaneous'):\n \"\"\"\n Management commands, help and configuration.\n \"\"\"\n\n def __init__(self, bot: commands.Bot, *args, **kwargs):\n self.bot = bot\n super().__init__(*args, **kwargs)\n\n @commands.command()\n async def shutdown(self, ctx: commands.Context):\n \"\"\"\n Shutdowns bot.\n \"\"\"\n\n if await self.bot.is_owner(ctx.author):\n msg = _('shutting down')\n await ctx.send(f'{msg.capitalize()}...')\n await self.bot.close()\n else:\n msg = _('you don\\'t have permission to perform this command')\n await ctx.send(f'{msg.capitalize()}.')\n","sub_path":"bot/cogs/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"568751798","text":"\n\n#calss header\nclass _DISCO():\n\tdef __init__(self,): \n\t\tself.name = \"DISCO\"\n\t\tself.definitions = [u'an event where people dance to modern recorded music for entertainment, or a place where this often happens: ', u'a type of music, popular in the 1970s, that people dance to in a nightclub ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_disco.py","file_name":"_disco.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"505238676","text":"from maya import OpenMaya\n\nfrom maya.OpenMaya import MVector\n\nimport math\n\n\n\n#----------------------------\n# Pythogream Theorem\n# cos**2 + adj**2 = hyp**2 \n# x= Adjacent y= Opposite , find = 0 <-- means find hypotneuse\n#----------------------------\n\ndef pythag( x, y, find= 0 ):\n\t''' returns as a float '''\n\t\n\tif not isinstance(x, float) and not isinstance(x, int):\n\t\traise ValueError(\"Please input only float/ interger values for x: such as 1.0\")\n\t\t\n\tif not isinstance(y, float) and not isinstance(y, int):\n\t\traise ValueError(\"Please input only float/ interger values for y: such as 1.0\")\n\n\tif find == 0:\n\t\t''' input is opposite and adjacent '''\n\t\treturn ((x**2)+(y**2))*0.5\n\t\t\n\t\t# Or you can use math.hypot(x, y)\n\n\tif find == 1:\n\t\t''' input is hypotenuse and opposite or adjacent '''\n\t\treturn abs((x**2)-(y**2))**0.5\n\n\n#---------------------------------------------------------------\n# 2D Barycentric Co-ordinates\n# Co-ordinates between start and end point: (start + end)* 0.5\n# Co-ordiates at percentage between start and end point (start * percent) + (end * (1-percent)) --> because percentage is a float between 0.0 and 1.0 and as such, 10% of 1 is 1 - 0.1 = 0.9\n#---------------------------------------------------------------\n\ndef bary_2D( start, end, percent ):\n\t''' \n\treturns a vector \n\tAccepts a vector start and a vector end and a float/interger percentage.\n\t'''\n\n\tif type(percent) not in [float, int]:\n\t\tif percent > 1 or percent < 0:\n\t\t\traise ValueError( \"Percent must fall in between 1 and 0.\")\n\t\traise ValueError(\"Please input only float/ interger values for incr such as 1.0\")\n\t\n\t\n\t# clamp = lambda n, minn, maxn: max(min(maxn, n), minn)\n\t# perc= clamp( percent, -1, 0)\n\t\n\tvc= lambda x, y, z, i, percent: ((x * (i+ percent)), (y*(i+ percent)), (z*(i+percent)))\n\t\n\tif start and end:\n\t\tvector1= vc(*(start+(0, percent)))\n\t\tvector2= vc(*(end+(-1, percent)))\n\t\n\tomVector1= MVector(*vector1)\n\tomVector2= MVector(*vector2)\n\t\n\tomVector3= omVector1 + omVector2\n\t\n\t#return ((start * percent) + (end * (1-percent)))\n\treturn omVector3.x, omVector3.y, omVector3.z\n\n\t\n\n#----------------------------------------------------------------\n# Adds two vectors and finds the length of the third vector\n#----------------------------------------------------------------\ndef mag( vector1, vector2 ):\n\t''' \n\treturn interger value \n\tUses Maya MVector class\n\t'''\n\n\tomVector1= MVector(*vector1)\n\t\n\tomVector2= MVector(*vector2)\n\t\n\tomVector3= omVector2 - omVector1\n\t\n\tf= lambda x, y, z: ((x**2)+(y**2)+(z**2))**0.5\n\t\n\treturn f(omVector3.x, omVector3.y, omVector3.z)\n\n\n\t\n#----------------------------------------------------------------\n# Uses a list of two values, adds two vectors and finds the length of the third vector\n#----------------------------------------------------------------\ndef magnitude( *args):\n\t'''\n\tINPUTS:\n\t\targs: magnitude( list(), list() ) or magnitude(list())\n\t\t\n\n\tINFO:\n\t\tsuebtracts the second list vector from the first.\n\t\tsquares each item in list then \n\t\t\n\t\tInputs must be a vector of 3 values in a list!\n\t\t\n\t'''\n\tdata= list()\n\t\n\t# if a list of tail vector values are fed.\n\tif len(args)== 2:\n\t\ttail= args[0]\n\t\tif not type(tail) is list:\n\t\t\traise ValueError(\"Please use a list of 3 floats or intergers\")\n\t\t\t\n\t\thead= args[1]\n\t\tif not type(head) is list:\n\t\t\traise ValueError(\"Please use a list of 3 floats or intergers\")\n\t\n\t\n\t\t\n\t\t# vectors assume only three values in the list!\n\t\t# Using this, we find the vector!\n\t\tfor j in range(3):\n\t\t\tdata.append((tail[j] - head[j])**2)\n\t\t\n\t\tdata= sum(data)\n\n\t\treturn abs(data**0.5)\n\n\t\n\t# if a list of head vector values are fed.\n\telif len(args)== 1:\n\t\thead= args[0]\n\t\tif not type(head) is list:\n\t\t\traise ValueError(\"Please use a list of 3 floats or intergers\")\n\t\t\n\t\t\n\t\t#Calculating magnitude of WorldSpace: sqrt((ax * ax) + (ay * ay) + (az * az)) \n\t\tfor j in range(len(head)):\n\t\t\tdata.append(head[j]**2)\n\t\tdata= sum(data)**0.5\n\t\t\n\t\treturn abs(data)\n\t\t\n\telse:\n\t\tprint(\"Wrong number of arguments used!\\r\\n\\\n\t\t\t\tYou must have either one or two lists of vectors: head, tail or head\")\n\t\n\t\n\t\n#----------------------------------------------------------------\n# Using formula cos? = a.b / |a||b|\t\n#----------------------------------------------------------------\ndef angle(vector1, vector2):\n\t'''\n\tComputes the angle from two vectors.\n\t'''\n\t\n\tdata= list()\n\t\n\t# Add all magnitude of vector components!\n\tfor j in range(3):\n\t\tdata.append(vector1[j] * vector2[j])\n\t\n\tdata= sum(data)\n\t\n\tdata= data / (magnitude(vector1) * magnitude(vector2))\n\t\n\treturn math.degrees(math.acos(data))\n\t\n#----------------------------------------------------------------\n# Creates a vector class \n#----------------------------------------------------------------\nclass Vector:\n\t'''\n\tINPUT:\n\t\targs: list() of three values, int(), int(), int() or float(), float(), float() types.\n\t\t\t\ttwo lists of three values: vector tail, vector head.\n\t\t\n\tINFO:\n\t\tInputs a vector list, returns a class function of the vector.\n\t\tAccepts tail, head vector lists.\n\t\tVector.magnitude returns a vector magnitude from origin.\n\t\t\n\tUSAGE:\n\t\t# Define vectors\n\t\ta= Vector([1.5, 0.4, 6])\n\t\tb= Vector([6, 2, 1])\n\t\t\n\t\t# Add or subtract vectors\n\t\tc = a + b\n\t\tprint \"ADD: %s\"%c\n\t\tc = a - b\n\t\tprint \"SUB: %s\"%c\n\t'''\n\tdef __init__(self, *args):\n\t\tdata= list()\n\t\tmag= list()\n\t\t\n\t\t# if a list of tail vector values are fed.\n\t\tif len(args)== 2:\n\t\t\ttail= args[0]\n\t\t\tif not type(tail) is list:\n\t\t\t\traise ValueError(\"Please use a list of 3 floats or intergers\")\n\t\t\t\t\n\t\t\thead= args[1]\n\t\t\tif not type(head) is list:\n\t\t\t\traise ValueError(\"Please use a list of 3 floats or intergers\")\n\t\t\n\t\t\n\t\t\tself.magnitude= magnitude(tail, head)\n\t\t\n\t\t# if a list of head vector values are fed.\n\t\telif len(args)== 1:\n\t\t\thead= args[0]\n\t\t\tif not type(head) is list:\n\t\t\t\traise ValueError(\"Please use a list of 3 floats or intergers\")\n\t\t\t\n\t\t\tself.data = head\n\t\t\t\n\t\t\tself.magnitude = magnitude(head)\n\t\t\t\n\t\t\t#Calculating magnitude of WorldSpace: sqrt((ax * ax) + (ay * ay) + (az * az)) \n\t\t\tfor j in range(len(self.data)):\n\t\t\t\tmag.append(self.data[j]*self.data[j])\n\t\t\tmag= sum(mag)**0.5\n\t\t\tself.magnitude = abs(mag)\n\t\t\t\n\t\telse:\n\t\t\tprint(\"Wrong number of arguments used!\\r\\n\\\n\t\t\t\t\tYou must have either one or two lists of vectors: head, tail or head\")\n\t\t\t\t\t\n\t\t# get the vector data by subtracting from the head by the tail.\n\t\tif \"tail\" in dir():\n\t\t\tfor v in range(len(tail)):\n\t\t\t\tdata.append(head[v] - tail[v])\n\t\t\t\n\t\t\tself.data = data\n\t\t\t\n\t\t\n\tdef __str__(self):\n\t\n\t\treturn repr(self.data)\n\t\n\tdef __repr__(self):\n\t\t'''\n\t\tWhen using __repr__, it is important to note that what returns is an instance\n\t\tYou can get the name of the instance by loc1.__class__.__name__\n\t\tBut you still need to get the object.data\n\t\t'''\n\t\treturn \"%s\"%self.data\n\n\tdef __add__(self, other):\n\t\tdata = list()\n\t\t\t\n\t\tfor j in range(len(self.data)):\n\t\t\tdata.append(self.data[j] + other.data[j])\n\n\t\treturn Vector(data)\n\n\tdef __sub__(self, other):\n\t\tdata = list()\n\n\t\tfor j in range(len(self.data)):\n\t\t\tdata.append(self.data[j] - other.data[j])\n\n\t\treturn Vector(data)\n\t\t\n\tdef __mul__(self, other):\n\t\tdata= list()\n\t\t\n\t\t\n\t\tif type(other) is float or type(other) is int:\n\t\t\tfor j in range(len(self.data)):\n\t\t\t\tdata.append(self.data[j] * other)\n\t\t\t\t\n\t\tif type(other) is list or type(other) is tuple:\n\t\t\tfor j in range(len(self.data)):\n\t\t\t\tdata.append(self.data[j] * other[j])\t\n\t\t\n\t\telse:\n\t\t\ttry:\n\t\t\t\tfor j in range(len(self.data)):\n\t\t\t\t\tdata.append(self.data[j] * other.data[j])\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\t\t\n\t\treturn Vector(data)\n\t\t\n\tdef __pow__(self, other):\n\t\tdata= list()\n\t\t\n\t\n\t\tif type(other) is float or type(other) is int:\n\t\t\tfor j in range(len(self.data)):\n\t\t\t\tdata.append(self.data[j] ** other)\n\t\t\t\t\n\t\tif type(other) is list or type(other) is tuple:\n\t\t\tfor j in range(len(self.data)):\n\t\t\t\tdata.append(self.data[j] ** other[j])\t\n\n\t\telse:\n\t\t\ttry:\n\t\t\t\tfor j in range(len(self.data)):\n\t\t\t\t\tdata.append(self.data[j] ** other.data[j])\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\n\t\t\n\t\treturn Vector(data)","sub_path":"rigging_tools/utils/math_utils.py","file_name":"math_utils.py","file_ext":"py","file_size_in_byte":7895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"203761177","text":"#!/usr/bin/env python3\n\nimport pandas as pd\nfrom itertools import product\n\n# The file to read\nin_file = 'Chinup_data_collection_plan_forScratch_v5_Interval15mm_new2.xlsx'\nout_file = 'OUI_script_v5_Scratch_Interval15mm_new2.txt'\n\n#all_offset = {'f':(1.1, 2.2, 3.3), \n# 'r':(4, 5, 6), \n# 'l':(7.7, 8.8, 9.9), \n# 'b':(10, 11, 12), \n# 't':(13.3, 14.4, 15.5)}\n\nxls = pd.ExcelFile(in_file)\n\ndf = pd.read_excel(in_file, \n skiprows=0, sheet_name=None, header=0, dtype=str)\n\ndata_sheet_names = list(filter(lambda x: x!='CCD1_code_table' and\n x!='CCD2_code_table', \n xls.sheet_names))\n\ndata_sheets = [df.get(sheet) for sheet in data_sheet_names]\n\ndf_total = pd.concat(data_sheets, ignore_index=True, sort=False)\n\ndf_ccd1 = pd.read_excel(in_file, skiprows=0,\n sheet_name='CCD1_code_table', header=0, dtype=str)\n\ndf_ccd2 = pd.read_excel(in_file, skiprows=0,\n sheet_name='CCD2_code_table', header=0, dtype=str)\n\ndf_total.to_csv('result_df.csv', index = False)\n\n\nnum_index = len(df_total['Index'])\nprint('num_index: ', num_index)\n\nlines = []\n\ndef get_Pnumber(ccd_df, expos, gain, illu):\n expos_condition = ccd_df.Exposure==expos\n df_tmp = ccd_df[expos_condition]\n\n gain_condition = df_tmp.Gain==gain\n df_tmp = df_tmp[gain_condition]\n\n illu_condition = df_tmp.Illumination==illu\n df_tmp = df_tmp[illu_condition]\n\n P_number = df_tmp.iloc[0]['Code']\n\n return P_number\n\ndef write_step(lines, step, acttype, act, f_coord, t_coord,\n Y1, theta, p_number, filename):\n lines.append('[Step{}]\\n'.format(step))\n lines.append('ActionType={}\\n'.format(acttype))\n lines.append('action={}\\n'.format(act))\n lines.append('F_X={}\\n'.format(f_coord[0]))\n lines.append('F_Y={}\\n'.format(f_coord[1]))\n lines.append('F_Z={}\\n'.format(f_coord[2]))\n lines.append('T_X={}\\n'.format(t_coord[0]))\n lines.append('T_Y={}\\n'.format(t_coord[1]))\n lines.append('T_Z={}\\n'.format(t_coord[2]))\n# lines.append('Offset=offset{}\\n'.format(offset))\n lines.append('Y1={}\\n'.format(Y1))\n lines.append('Theta={}\\n'.format(theta))\n# lines.append('Exposure={}\\n'.format(expos))\n# lines.append('Gain={}\\n'.format(gain))\n# lines.append('WhiteBalance={}\\n'.format(whitebalance))\n lines.append('P_Number={}\\n'.format(p_number))\n lines.append('Filename={}\\n'.format(filename))\n lines.append('\\n')\n\nstep = 1\nmv_step = 0\nshot_step = 0\nrotate_step = 0\nold_theta = '0'\nfor i, Idx in enumerate(df_total['Index']):\n Xs = list(map(lambda x: x.strip(), df_total.iloc[i]['X'].split(',') ))\n Ys = list(map(lambda y: y.strip(), df_total.iloc[i]['Y'].split(',') ))\n Zs = list(map(lambda z: z.strip(), df_total.iloc[i]['Z'].split(',') ))\n Exposes = df_total.iloc[i]['Exposure'].replace(' ', '').split(',')\n Side = df_total.iloc[i]['Side'].replace(' ', '') \n Y1 = df_total.iloc[i]['Y1'].replace(' ', '')\n Theta = df_total.iloc[i]['Theta'].replace(' ', '')\n Gain = df_total.iloc[i]['Gain'].replace(' ', '')\n# WhiteBalance = df_total.iloc[i]['WhiteBalance'].replace(' ', '')\n Illu = df_total.iloc[i]['Illumination'].replace(' ', '')\n\n # Rotate\n if Theta != old_theta:\n acttype = 1\n act = 8\n P_number = 'None'\n Filename = 'None'\n \n write_step(lines, step, acttype, act, (0, 0, 0), (0, 0, 0), \n Y1, Theta, P_number, Filename)\n\n step += 1\n rotate_step += 1\n\n coords = [Xs, Ys, Zs]\n coords.sort(key=len, reverse=True)\n\n Xs_idx = coords.index(Xs)\n Ys_idx = coords.index(Ys)\n Zs_idx = coords.index(Zs)\n\n mv_idx = 0\n for outer, mid in product(coords[2], coords[1]):\n for inner in coords[0]:\n loop_list = [inner, mid, outer]\n\n X = loop_list[Xs_idx]\n Y = loop_list[Ys_idx]\n Z = loop_list[Zs_idx]\n\n # Translate\n acttype = 1\n act = 2 if Side == 't' else 1\n f_coord = (0, 0, 0) if Side == 't' else (X, Y, Z)\n t_coord = (X, Y, Z) if Side == 't' else (0, 0, 0)\n P_number = 'None'\n Filename = 'None'\n\n write_step(lines, step, acttype, act, f_coord, t_coord,\n Y1, Theta, P_number, Filename)\n\n step += 1\n mv_step += 1\n mv_idx += 1\n\n # Shot\n acttype = 3 if Side == 't' else 2\n act = 32 if Side == 't' else 16\n ccd_df = df_ccd2 if Side == 't' else df_ccd1\n f_coord = (0, 0, 0) if Side == 't' else (X, Y, Z)\n t_coord = (X, Y, Z) if Side == 't' else (0, 0, 0)\n\n if '.' in X:\n X_name = X.split('.')[0] + 'd' + X.split('.')[1]\n else:\n X_name = X\n\n if '.' in Y:\n Y_name = Y.split('.')[0] + 'd' + Y.split('.')[1]\n else:\n Y_name = Y\n\n if '.' in Z:\n Z_name = Z.split('.')[0] + 'd' + Z.split('.')[1]\n else:\n# Z_name = Z + 'd'\n Z_name = Z\n\n Filename = '{}side_{}_{:04d}_{}X_{}Y_{}Z'.format(\n Side.upper(), Idx, mv_idx, X_name, Y_name, Z_name)\n\n P_numbers = ''\n for Expos in Exposes:\n P_number = get_Pnumber(ccd_df, Expos, Gain, Illu)\n P_numbers += P_number + ','\n \n P_numbers = P_numbers.rstrip(',') \n \n write_step(lines, step, acttype, act, f_coord, t_coord,\n Y1, Theta, P_numbers, Filename)\n\n step += 1\n shot_step += 1\n \n coords[0] = coords[0][::-1]\n\n old_theta = Theta\n\n# write to file\nlines.insert(0, '[StepData]\\n')\nlines.insert(1, 'AllStep={}\\n'.format(step-1))\n#idx = 2\n#for ofs in ('f', 'r', 'l', 'b', 't'):\n# string = 'offset' + ofs\n# lines.insert(idx, '{}={},{},{}\\n'.format(string,\n# all_offset[ofs][0],\n# all_offset[ofs][1],\n# all_offset[ofs][2]))\n# idx += 1\n\nlines.insert(2, '\\n')\nlines.pop()\n\nwith open(out_file, 'w') as f:\n for line in lines:\n f.write(line)\n\nprint()\nprint('{:>17s}{}'.format('total steps: ', step-1))\nprint('{:>17s}{}'.format('translate steps: ', mv_step))\nprint('{:>17s}{}'.format('rotate steps: ', rotate_step))\nprint('{:>17s}{}'.format('shot steps: ', shot_step))\nprint()\nprint('input file: ', in_file)\nprint('output file:', out_file)\n\n","sub_path":"ITRI_ADAT/Chinup_script/Excel2OUI_v5.py","file_name":"Excel2OUI_v5.py","file_ext":"py","file_size_in_byte":6640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"2325733","text":"\"\"\"empty message\n\nRevision ID: 595333e172df\nRevises: 34b247c572fb\nCreate Date: 2019-05-06 14:22:32.899160\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '595333e172df'\ndown_revision = '34b247c572fb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('ssh_key',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('private', sa.String(length=128), nullable=True),\n sa.Column('public', sa.String(length=128), nullable=True),\n sa.Column('type', sa.String(length=24), nullable=True),\n sa.Column('note', sa.String(length=256), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.add_column('container', sa.Column('ssh_key_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'container', 'ssh_key', ['ssh_key_id'], ['id'])\n op.add_column('task_queue', sa.Column('completed', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('task_queue', 'completed')\n op.drop_constraint(None, 'container', type_='foreignkey')\n op.drop_column('container', 'ssh_key_id')\n op.drop_table('ssh_key')\n # ### end Alembic commands ###\n","sub_path":"portal/migrations/versions/595333e172df_.py","file_name":"595333e172df_.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"211751758","text":"import numpy as np\nimport cv2\nimport glob\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n # Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Take the derivative in x or y given orient = 'x' or 'y'\n if orient=='y':\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n else:\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n # Take the absolute value of the derivative or gradient\n abs_sobelx = np.absolute(sobelx)\n # Scale to 8-bit (0 - 255) then convert to type = np.uint8\n scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))\n # Create a mask of 1's where the scaled gradient magnitude \n # is > thresh_min and < thresh_max\n sxbinary = np.zeros_like(scaled_sobel)\n sxbinary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n # Return this mask as a binary image \n return sxbinary\ndef mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):\n # Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Take the gradient in x and y separately\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # Calculate the magnitude\n gradmag = np.sqrt(sobelx**2 + sobely**2)\n # Scale to 8-bit (0 - 255) and convert to type = np.uint8\n scale_factor = np.max(gradmag)/255 \n gradmag = (gradmag/scale_factor).astype(np.uint8)\n # Create a binary mask where mag thresholds are met \n sxybinary = np.zeros_like(gradmag)\n sxybinary[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1\n # Return a binary image of threshold result \n return sxybinary\ndef dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):\n # Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Take the gradient in x and y separately\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize=sobel_kernel)\n # Take the absolute value of the x and y gradients\n abs_sobelx = np.absolute(sobelx)\n abs_sobely = np.absolute(sobely)\n # Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient\n abs_grad_direction = np.arctan2(abs_sobely, abs_sobelx)\n # Create a binary mask where direction thresholds are met\n direction_binary = np.zeros_like(abs_grad_direction)\n direction_binary[(abs_grad_direction >= thresh[0]) & (abs_grad_direction <= thresh[1])]=1\n # Return a binary image of threshold result \n return direction_binary\ndef hls_select(img, channel='S', thresh=(0, 255)):\n # Convert to HLS color space\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n if channel=='H':\n channel = hls[:,:,0]\n elif channel=='L':\n channel = hls[:,:,1]\n elif channel=='S':\n channel = hls[:,:,2]\n # Apply a threshold to the S channel\n hls_binary = np.zeros_like(channel)\n hls_binary[(channel > thresh[0]) & (channel <= thresh[1])] = 1\n # Return a binary image of threshold result\n return hls_binary","sub_path":"thresholds.py","file_name":"thresholds.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"196075289","text":"# отрисовывает все массивы из /tests/in\nimport os\nimport matplotlib.pyplot as plt\n\ndef draw(file):\n\tx = []\n\ty = []\n\twith open(file, 'r+') as _in:\n\t\tfor line in _in:\n\t\t\tx.append(float(line.split(';')[0]))\n\t\t\ty.append(float(line.split(';')[1]))\n\t\tplt.scatter(x, y)\n\t\tplt.plot(x, y)\n\t\tplt.show()\n\n\ndir_name = 'in_2/'\nfor _, _, files in os.walk(dir_name):\n i = 0\n for file in files:\n i += 1\n plt.figure(i)\n draw(dir_name + file)\n","sub_path":"tests/draw_all.py","file_name":"draw_all.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"150398043","text":"days = ['星期一','星期二','星期三','星期四','星期五','星期六','星期天']\nimport project2_util\nwday,hour,minute = project2_util.get_time()\nprint('今天是%s,当前时间%d:%d'%(days[wday-1],hour,minute))\ninterval = int(input('请输入一个时间间隔(分钟)'))\ntime = interval + hour*60 + minute\nt1 = time // 1440\nt2 = wday + t1 % 7\nif(t2 > 7):\n t2 = t2 - 7 \nprint('在%d分钟之后是今天之后的第%d天,那一天是%s'%(interval,t1,days[t2-1]))\n\n","sub_path":"project2/project/17300750039.py","file_name":"17300750039.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"126407681","text":"from qm.QuantumMachinesManager import QuantumMachinesManager\nfrom qm.qua import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom configuration import *\nfrom qm import SimulationConfig\n\n################################\n# Open quantum machine manager #\n################################\n\nqmm = QuantumMachinesManager()\n\n########################\n# Open quantum machine #\n########################\n\nqm = qmm.open_qm(config)\n\n###################\n# The QUA program #\n###################\n\n# Declare Python variables\n###########################\n\nt_min = 4 # in clock cycles units\nt_max = 250 # in clock cycles units\ndt = 10 # in clock cycles units\nt_vec = np.arange(t_min, t_max + dt / 2, dt) # +dt/2 to include t_max in array\nn_avg = 1e6\n\nwith program() as time_rabi:\n\n # Declare QUA variables\n ###################\n times = declare(int, size=100) # 'size' defines the max number of photons to be counted\n counts = declare(int) # variable to save the total number of photons\n counts_st = declare_stream() # stream for 'counts'\n t = declare(int) # variable to sweep over in time\n n = declare(int) # variable to for_loop\n n_st = declare_stream() # stream to save iterations\n\n # Pulse sequence\n ################\n with for_(n, 0, n < n_avg, n + 1):\n\n with for_(t, t_min, t <= t_max, t + dt):\n play(\"charge_init\", \"laser_705nm\") # charge initialization\n play(\"spin_init\", \"laser_E12\") # spin initialization\n align(\"qubit\", \"laser_E12\", \"laser_705nm\")\n play(\"gauss\", \"qubit\", duration=t) # gaussian pulse of varied amplitude\n align(\"qubit\", \"laser_EX\", \"SNSPD\")\n play(\"EX\", \"laser_EX\") # Spin readout to m = 0 transition\n measure(\n \"photon_count\",\n \"SNSPD\",\n None,\n time_tagging.analog(times, meas_len, counts),\n ) # photon count\n save(counts, counts_st) # save counts\n\n save(n, n_st) # save number of iteration inside for_loop\n\n # Stream processing\n ###################\n with stream_processing():\n counts_st.buffer(len(t_vec)).average().save(\"counts\")\n n_st.save(\"iteration\")\n\n#######################\n# Simulate or execute #\n#######################\n\nsimulate = True\n\nif simulate:\n qmm.simulate(config, time_rabi, SimulationConfig(10000)).get_simulated_samples().con1.plot()\nelse:\n job = qm.execute(time_rabi) # execute QUA program\n\n res_handle = job.result_handles # get access to handles\n vec_handle = res_handle.get(\"counts\")\n vec_handle.wait_for_values(1)\n iteration_handle = res_handle.get(\"iteration\")\n iteration_handle.wait_for_values(1)\n\n while vec_handle.is_processing():\n try:\n counts = vec_handle.fetch_all()\n iteration = iteration_handle.fetch_all() + 1\n\n except Exception as e:\n pass\n\n else:\n plt.plot(4 * t_vec, counts / 1000 / (meas_len * 1e-9) / iteration) # kcps\n plt.xlabel(\"t [ns]\")\n plt.ylabel(\"counts [kcps]\")\n plt.title(\"Time Rabi\")\n plt.pause(0.1)\n plt.clf()\n\n counts = vec_handle.fetch_all()\n plt.plot(4 * t_vec, counts / 1000 / (meas_len * 1e-9) / n_avg) # kcps\n plt.xlabel(\"t [ns]\")\n plt.ylabel(\"counts [kcps]\")\n plt.title(\"Time Rabi\")\n plt.savefig(\"time_rabi.png\")\n","sub_path":"examples-old/Quantum Dots/Cryogenic Single NV center W spin-to-charge conversion/time_rabi.py","file_name":"time_rabi.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"177717813","text":"import cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pylab as plt1\r\nfrom scipy import ndimage\r\nfrom sklearn.cluster import KMeans\r\nimport sys\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n\r\n\r\nLMLO = 712\r\nCC = 704\r\nLB = 151\r\n\r\ndimesional_array = np.load('3D_array_test_max_density.npy')\r\nwalkers = 200\r\nsteps = []\r\nwalker_array = np.zeros((LMLO,700,CC))\r\n\r\nt = np.amax(dimesional_array)\r\n\r\ndef prob_value(array, ii, jj, kk):\r\n lum = dimesional_array[kk,ii,jj]\r\n p = int(round(100*lum/255))\r\n return p\r\ndef lum_value(array, ii, jj, kk):\r\n lum = dimesional_array[kk,ii,jj]\r\n return lum\r\n\r\ndef ran_start(array):\r\n for ll in range(0,100):\r\n val = np.random.randint(-100,high = 500,size = 3)\r\n if array[val[0],val[1],val[2]] >= LB:\r\n return val\r\nu = ran_start(dimesional_array)\r\nx0 = 310\r\ny0 = 350\r\nz0 = 380\r\nprint(u)\r\n\"\"\"\r\nfor kk in range(0, LMLO):\r\n for jj in range(0, CC):\r\n for ii in range(0, 500):\r\n if dimesional_array[kk,ii,jj] == t:\r\n x0 = int(ii)\r\n y0 = int(jj)\r\n z0 = int(kk)\r\n\r\n\r\n\r\nes_steps=30000\r\n\r\n\r\nfor ww in range(0, walkers):\r\n xlist = [x0]\r\n ylist = [y0]\r\n zlist = [z0]\r\n for rr in range(0, es_steps):\r\n s = np.random.randint(-1,high = 2,size = 1)\r\n pos = np.random.randint(-1,high = 3,size = 1)\r\n if pos[0] == 0: \r\n xlist.append(s[0])\r\n elif pos[0] == 1:\r\n ylist.append(s[0])\r\n elif pos[0] == 2:\r\n zlist.append(s[0])\r\n ii = int(np.sum(xlist))\r\n jj = int(np.sum(ylist))\r\n kk = int(np.sum(zlist))\r\n if dimesional_array[kk,ii,jj] <=1:\r\n steps.append(rr)\r\n break\r\naverage_steps = round(sum(steps)/0.25*len(steps))\r\n\"\"\"\r\naverage_steps = 10000\r\nbound = 3000\r\nfor ww in range(0, walkers):\r\n print(ww)\r\n xlist = [x0]\r\n ylist = [y0]\r\n zlist = [z0]\r\n walker_array[zlist[0],xlist[0],ylist[0]] = 1\r\n for rr in range(0, average_steps):\r\n a = 1\r\n while a > 0:\r\n x_val = np.random.randint(-1,high = 2,size = 1)\r\n y_val = np.random.randint(-1,high = 2,size = 1)\r\n z_val = np.random.randint(-1,high = 2,size = 1)\r\n ii = xlist[rr] + x_val[0]\r\n jj = ylist[rr] + y_val[0]\r\n kk = zlist[rr] + z_val[0]\r\n prob_val = prob_value(dimesional_array, ii, jj, kk)\r\n lum = lum_value(dimesional_array, ii, jj, kk)\r\n lower_bound = round(rr/bound, 2)\r\n s = np.random.randint(-1,high = 101,size = 1)\r\n if lower_bound <= 1.00:\r\n if s - prob_val <= 0 and lum >=LB:\r\n xlist.append(ii)\r\n ylist.append(jj)\r\n zlist.append(kk)\r\n walker_array[kk,ii,jj] += 1\r\n a = 0\r\n else: \r\n if s - prob_val <= 0:\r\n xlist.append(ii)\r\n ylist.append(jj)\r\n zlist.append(kk)\r\n walker_array[kk,ii,jj] =+ 1\r\n a = 0\r\n \r\n \r\n\r\nnp.save('2020_02_21_SC_LB151_W200_ST10K_B3K', walker_array)\r\n\r\n\r\n","sub_path":"Random_walkers/Prob_walkers_with restrictions.py","file_name":"Prob_walkers_with restrictions.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"509970016","text":"\n\n#calss header\nclass _KAZOO():\n\tdef __init__(self,): \n\t\tself.name = \"KAZOO\"\n\t\tself.definitions = [u'a small musical instrument consisting of a plastic or metal tube with a small piece of paper inside which shakes when the player hums (= sings with closed mouth) into it, making a high sound']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_kazoo.py","file_name":"_kazoo.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"608734429","text":"#!/usr/bin/python2\n# TestGfx.py - Test Module for Gfx.py\n\n\nimport random\n\nimport Gfx, wxGfx\n#from wxPython.wx import *\nimport wx\n\ndef rnd():\n return random.random()-0.5\n\n\nclass Landscape:\n def __init__(self):\n self.setup()\n\n def setup(self):\n self.iter = 1\n self.scale = 0.9\n self.amp = 1.0\n self.net = [[self.amp*rnd(), self.amp*rnd()], [self.amp*rnd()]]\n self.amp /= 2.0\n\n self.lastColor = (-1.0, -1.0, -1.0)\n \n\n def nextIteration(self):\n net = []\n for line in range(len(self.net)-1):\n net.extend([[],[]])\n for column in range(len(self.net[line])-1):\n net[-2].append(self.net[line][column])\n net[-2].append((self.net[line][column] +\n self.net[line][column+1]) / 2.0 +\n rnd() * self.amp)\n net[-1].append((self.net[line][column] +\n self.net[line+1][column]) / 2.0 +\n rnd() * self.amp)\n net[-1].append((self.net[line][column+1] +\n self.net[line+1][column]) / 2.0 +\n rnd() * self.amp)\n net[-2].append(self.net[line][-1])\n net.append(self.net[-1])\n self.amp /= 2.0\n self.net = net\n self.iter += 1\n\n\n def paintWireframe(self, gfx):\n \"\"\"Redraw the current stage of the Landscape as wireframe.\n \"\"\"\n\n def color(z1, z2):\n if z1 <= 0.0 and z2 <= 0.0: return (0.0, 0.5, 1.0)\n elif z1 >= 0.4 and z2 >= 0.4: return (0.6, 0.6, 0.6)\n else: return (0.1, 1.0, 0.1)\n\n def drawLine(x1, y1, x2, y2, c):\n if self.lastColor != c:\n gfx.setColor(c)\n self.lastColor = c\n gfx.drawLine(int(x1), int(y1), int(x2), int(y2))\n \n\n w,h = gfx.getSize()\n dx = w/6.0; dy = h/6.0 \n w *= 2.0/3.0; h *= 1.0/2.0\n gfx.clear()\n\n delta = 1.0 / (len(self.net)-1)\n y = 0.0\n for line in range(len(self.net)-1):\n x = line * delta / 2.0\n for column in range(len(self.net[line])-1):\n z1 = self.net[line][column]\n z2 = self.net[line][column+1]\n z3 = self.net[line+1][column]\n if z1 < 0.0: z1 = 0.0\n if z2 < 0.0: z2 = 0.0\n if z3 < 0.0: z3 = 0.0\n x1,y1 = x, y+z1*self.scale\n x2,y2 = x+delta, y+z2*self.scale\n x3,y3 = x+delta/2.0, y+delta+z3*self.scale\n c1 = color(z1, z2)\n c2 = color(z1, z3)\n c3 = color(z2, z3)\n drawLine(x1*w+dx,y1*h+dy, x2*w+dx,y2*h+dy, c1)\n drawLine(x1*w+dx,y1*h+dy, x3*w+dx,y3*h+dy, c2)\n drawLine(x2*w+dx,y2*h+dy, x3*w+dx,y3*h+dy, c3)\n x += delta\n y += delta\n\n\n def paintSolid(self, gfx):\n \"\"\"Redraw the current landscape.\n \"\"\"\n\n def color(edges, z1, z2, z3):\n # much too simple algorithm\n if z1 <= 0.0 and z2 <= 0.0 and z3 <= 0.0: baseC = (0.0, 0.5, 1.0)\n elif z1 >= 0.4 and z2 >= 0.4 and z3 >= 0.4: baseC = (0.6, 0.6, 0.6)\n else: baseC = (0.1, 1.0, 0.1)\n dv = 1.0+10.0*max(abs(z1-z2), abs(z2-z3))\n return (baseC[0]/dv, baseC[1]/dv, baseC[2]/dv)\n\n w,h = gfx.getSize()\n dx = w/6.0; dy = h/3.0 \n w *= 2.0/3.0; h *= 1.0/2.0\n gfx.clear()\n\n delta = 1.0 / (len(self.net)-1)\n y = 0.0\n for line in range(len(self.net)-1):\n x = line * delta / 2.0\n for column in range(len(self.net[line])-1):\n z1 = self.net[line][column]\n z2 = self.net[line][column+1]\n z3 = self.net[line+1][column]\n if z1 < 0.0: z1 = 0.0\n if z2 < 0.0: z2 = 0.0\n if z3 < 0.0: z3 = 0.0\n x1,y1 = x, y-z1*self.scale\n x2,y2 = x+delta, y-z2*self.scale\n x3,y3 = x+delta/2.0, y+delta-z3*self.scale\n edges = [(x1*w+dx, y1*h+dy), (x2*w+dx, y2*h+dy),\n (x3*w+dx, y3*h+dy)]\n gfx.setFillColor(color(edges, z1, z2, z3)) \n gfx.fillPoly(edges)\n try:\n z4 = self.net[line+1][column+1]\n if z4 < 0.0: z4 = 0\n x4, y4 = x+delta*3.0/2.0, y+delta-z4*self.scale\n edges[0] = (x4*w+dx, y4*h+dy)\n gfx.setFillColor(color(edges, z4, z2, z3))\n gfx.fillPoly(edges)\n except IndexError: pass\n x += delta\n y += delta \n \n\n\nclass TestApp(wx.App):\n def __init__(self):\n wx.App.__init__(self, 0)\n self.iter = 0\n\n def OnInit(self):\n self.landscape = Landscape()\n self.win = wx.Frame(None, -1, \"Test - GfxDriver\")\n self.sizer = wx.BoxSizer(wx.VERTICAL)\n self.paintArea = wx.Window(self.win, -1, size=(640, 400))\n self.paintArea.SetSizeHints(100, 100, 1024, 768)\n self.sizer.Add(self.paintArea, 1, wx.EXPAND)\n self.button = wx.Button(self.win, 100, \"Next Step\",\n size=wx.Size(100, 30))\n self.sizer.Add(self.button, 0, wx.ALIGN_CENTER_HORIZONTAL)\n self.win.SetSizer(self.sizer)\n self.sizer.SetSizeHints(self.win)\n\n self.refresh = True\n wx.EVT_BUTTON(self.button, -1, self.OnButton)\n wx.EVT_PAINT(self.paintArea, self.OnPaint)\n wx.EVT_IDLE(self.win, self.OnIdle)\n\n self.win.SetSize(wx.Size(640, 400))\n self.win.Show(1)\n self.SetTopWindow(self.win)\n self.gfx = wxGfx.Driver(None)\n return 1\n\n def OnButton(self, event):\n self.iter += 1\n if self.iter > 6:\n self.landscape.setup()\n self.iter = 0\n else:\n self.landscape.nextIteration()\n self.refresh = True\n\n def OnIdle(self, event):\n if self.refresh:\n self.DC = wx.ClientDC(self.paintArea)\n self.DC.BeginDrawing()\n self.gfx.changeDC(self.DC)\n self.landscape.paintWireframe(self.gfx)\n # self.landscape.paintSolid(self.gfx)\n self.gfx.changeDC(None)\n self.DC.EndDrawing()\n self.refresh = False\n\n def OnPaint(self, event):\n self.refresh = True\n event.Skip()\n\n def run(self):\n self.win.Refresh()\n self.MainLoop()\n\n\nif __name__ == \"__main__\":\n app = TestApp()\n app.run()\n","sub_path":"PyPlotter/Test_Gfx.py","file_name":"Test_Gfx.py","file_ext":"py","file_size_in_byte":6716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"223079834","text":"import os\nimport sys\n\nimport pandas as pd\nimport pytest\n\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n\nfrom tasks.customer_info_1 import CustomerInfo\nfrom tests.test_initial import transactions, ipts\n\n\ndef test_customer_ipt_exact_match(transactions, ipts):\n transactions = transactions\n ipts = ipts\n cust_ids = transactions.index.unique()\n for cust_id in cust_ids:\n cust_transactions = transactions.loc[cust_id]\n cust_info = CustomerInfo(cust_transactions)\n assert cust_info.ipt == ipts.loc[cust_id].ipt, \"IPT Doesn't match\"\n\n\ndef test_customer_ipt_almost_match(transactions, ipts):\n transactions = transactions\n ipts = ipts\n\n cust_ids = transactions.index.unique()\n for cust_id in cust_ids:\n cust_transactions = transactions.loc[cust_id]\n cust_info = CustomerInfo(cust_transactions)\n assert abs(\n (cust_info.ipt - ipts.loc[cust_id].ipt) /\n ipts.loc[cust_id].ipt\n ) < .15, \"The difference between the IPTs is > 15%\"","sub_path":"tests/test_customer_info.py","file_name":"test_customer_info.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"488468207","text":"import argparse\nfrom collections import defaultdict\nfrom tree import Tree\nimport heapq\n\n\"\"\"\nTakes a .trees file and prints out the 10 most\nfrequently used rules.\n\"\"\"\n\nrules_used = defaultdict(int)\n\n\ndef fill_dict(filename):\n with open(filename) as tfile:\n for line in tfile:\n tree = Tree.from_str(line)\n count_frequencies(tree.root)\n\n\ndef count_frequencies(node):\n if node is None or len(node.children) == 0:\n return\n c = node.children\n if len(node.children) == 1:\n rules_used[node.__str__(), c[0].__str__()] += 1\n else:\n rules_used[node.__str__(), c[0].__str__(), c[1].__str__()] += 1\n for child in node.children:\n count_frequencies(child)\n\n\ndef print_most_freq():\n i = 1\n for rule in heapq.nlargest(20, rules_used, key=rules_used.get):\n if len(rule) == 2:\n string = '{} -> {}'.format(rule[0], rule[1])\n else:\n string = '{} -> {} {}'.format(rule[0], rule[1], rule[2])\n print('{}. {}: {}'.format(i, string, rules_used[rule]))\n i += 1\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('tree_file')\n args = parser.parse_args()\n fill_dict(args.tree_file)\n print_most_freq()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"rules_counter.py","file_name":"rules_counter.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"123163478","text":"import numpy as np \r\nfrom scipy.interpolate import interp1d\r\nfrom dmx import dmx\r\nimport time\r\n\r\ndef interpolate(r, g, b):\r\n from config import config\r\n low = config.load(\"positions\", \"low\", [[0,0], [0,0], [0,0]])\r\n center = config.load(\"positions\", \"center\", [[0,0], [0,0], [0,0]])\r\n high = config.load(\"positions\", \"high\", [[0,0], [0,0], [0,0]])\r\n\r\n xr = [0, r, 255] # low, center, high\r\n xg = [0, g, 255] # low, center, high\r\n xb = [0, b, 255] # low, center, high\r\n p = [ low, center, high ]\r\n print(p)\r\n rr = [p[0][0][0], p[1][0][0], p[2][0][0]]\r\n rt = [p[0][0][1], p[1][0][1], p[2][0][1]]\r\n gr = [p[0][1][0], p[1][1][0], p[2][1][0]]\r\n gt = [p[0][1][1], p[1][1][1], p[2][1][1]]\r\n br = [p[0][2][0], p[1][2][0], p[2][2][0]]\r\n bt = [p[0][2][1], p[1][2][1], p[2][2][1]]\r\n\r\n x_new = np.linspace(0, 255, 256)\r\n\r\n frr = interp1d(xr, rr, kind='linear')\r\n frt = interp1d(xr, rt, kind='linear')\r\n fgr = interp1d(xg, gr, kind='linear')\r\n fgt = interp1d(xg, gt, kind='linear')\r\n fbr = interp1d(xb, br, kind='linear')\r\n fbt = interp1d(xb, bt, kind='linear')\r\n\r\n yr = [[int(round(r)),int(round(t))] for r,t in np.stack((frr(x_new), frt(x_new)), axis=-1)]\r\n yg = [[int(round(r)),int(round(t))] for r,t in np.stack((fgr(x_new), fgt(x_new)), axis=-1)]\r\n yb = [[int(round(r)),int(round(t))] for r,t in np.stack((fbr(x_new), fbt(x_new)), axis=-1)]\r\n\r\n return [[yr[i], yg[i], yb[i]] for i in range(0,256)]\r\n \r\nif __name__ == \"__main__\":\r\n dmx.send_rgb(255,255,255)\r\n from colors import COLORS\r\n c = COLORS[32][1]\r\n print(COLORS[32])\r\n ip = interpolate(c.r, c.g, c.b)\r\n print(ip[c.r][0], ip[c.g][1], ip[c.b][2])\r\n dmx.send_rt(ip[0][0], ip[0][1], ip[0][2])\r\n time.sleep( 1 )\r\n dmx.send_rt(ip[c.r][0], ip[c.g][1], ip[c.b][2])\r\n time.sleep( 1 )\r\n dmx.send_rt(ip[255][0], ip[255][1], ip[255][2])\r\n time.sleep( 1 )\r\n \r\n for i in range(0,255):\r\n dmx.send_rt(*ip[i])\r\n time.sleep( 0.03 )\r\n \r\n","sub_path":"interpolate.py","file_name":"interpolate.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"622900637","text":"import torch\nfrom torch import nn\nfrom transformers import BertPreTrainedModel, BertModel\n\n\nclass JointBERT(BertPreTrainedModel):\n def __init__(self, config, device, slot_dim, intent_dim, intent_weight=None, context=False):\n super(JointBERT, self).__init__(config)\n self.slot_num_labels = slot_dim\n self.intent_num_labels = intent_dim\n self.device = device\n self.intent_weight = intent_weight if intent_weight is not None else torch.tensor([1.]*intent_dim)\n\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.context = context\n if context:\n self.intent_classifier = nn.Linear(2 * config.hidden_size, self.intent_num_labels)\n else:\n self.intent_classifier = nn.Linear(config.hidden_size, self.intent_num_labels)\n self.slot_classifier = nn.Linear(config.hidden_size, self.slot_num_labels)\n self.intent_loss_fct = torch.nn.BCEWithLogitsLoss(pos_weight=self.intent_weight)\n self.slot_loss_fct = torch.nn.CrossEntropyLoss()\n\n self.init_weights()\n\n def forward(self, word_seq_tensor, word_mask_tensor, tag_seq_tensor=None, tag_mask_tensor=None,\n intent_tensor=None, context_seq_tensor=None, context_mask_tensor=None):\n outputs = self.bert(input_ids=word_seq_tensor,\n attention_mask=word_mask_tensor)\n\n sequence_output = outputs[0]\n pooled_output = outputs[1]\n\n sequence_output = self.dropout(sequence_output)\n slot_logits = self.slot_classifier(sequence_output)\n outputs = (slot_logits,)\n\n if self.context and context_seq_tensor is not None:\n context_output = self.bert(input_ids=context_seq_tensor, attention_mask=context_mask_tensor)[1]\n pooled_output = torch.cat([context_output, pooled_output], dim=-1)\n pooled_output = self.dropout(pooled_output)\n intent_logits = self.intent_classifier(pooled_output)\n outputs = outputs + (intent_logits,)\n\n if tag_seq_tensor is not None:\n active_tag_loss = tag_mask_tensor.view(-1) == 1\n active_tag_logits = slot_logits.view(-1, self.slot_num_labels)[active_tag_loss]\n active_tag_labels = tag_seq_tensor.view(-1)[active_tag_loss]\n slot_loss = self.slot_loss_fct(active_tag_logits, active_tag_labels)\n\n outputs = outputs + (slot_loss,)\n\n if intent_tensor is not None:\n intent_loss = self.intent_loss_fct(intent_logits, intent_tensor)\n outputs = outputs + (intent_loss,)\n\n return outputs # slot_logits, intent_logits, (slot_loss), (intent_loss),\n","sub_path":"convlab/modules/nlu/multiwoz/bert/jointBERT.py","file_name":"jointBERT.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"133135801","text":"#\n# history\n#\n# 2020/05/27 added de possibility of asking for a changeable string\n# that is not in the sub_parts_dictionary to support\n# change_one_occurrence_in_parts. (jbs)\n#\n\n\n\n\n\nimport logging\n\nfrom text_transformer.tt_unchangeable_text_sub_part import TTUnchangeableTextSubPart\nfrom text_transformer.tt_changeable_text_sub_part import TTChangeableTextSubPart\n\n\n\n\n\nclass TTTextPart:\n \"\"\"A TTTextPart is a sequence of TTUnchangeableTextSubPart's and\n TTChangeableTextSubPart's.\n\n \"\"\"\n\n\n\n\n\n #def __init__(self, text_string, text_transformer):\n def __init__(self, text_string):\n\n self.sub_parts_list = [TTUnchangeableTextSubPart(text_string)]\n\n #self.transformer = text_transformer\n\n # keys are the substrings in the original string to be\n # transformed (changeable strings). values are lists with\n # subsparts ocurrences objects\n self.sub_parts_dictionary = {}\n\n\n\n\n\n def to_string(self):\n\n result = \"\"\n\n for sub_part in self.sub_parts_list:\n\n result = result + sub_part.to_string()\n\n return result\n\n\n\n\n \n def get_string_sub_parts(self, original_text, changeable_string):\n\n splited_text = original_text.split(changeable_string)\n\n sub_parts_list = []\n\n for element in splited_text[0:-1]:\n\n sub_part = TTUnchangeableTextSubPart(element)\n sub_parts_list.append(sub_part)\n\n sub_part = TTChangeableTextSubPart(changeable_string)\n sub_parts_list.append(sub_part)\n #self.transformer.append_sub_part(changeable_string, sub_part)\n self.sub_parts_dictionary[changeable_string].append(sub_part)\n\n last_element = splited_text[-1]\n sub_part = TTUnchangeableTextSubPart(last_element)\n sub_parts_list.append(sub_part)\n\n return sub_parts_list\n \n\n\n\n\n def create_sub_parts(self, changeable_string):\n\n logging.debug(\"TTTextPart: create_sub_parts original_string = \\\"\"\n + changeable_string + \"\\\"\")\n\n new_sub_parts_list = []\n self.sub_parts_dictionary[changeable_string] = []\n\n for sub_part in self.sub_parts_list:\n\n if isinstance(sub_part, TTUnchangeableTextSubPart):\n original_text = sub_part.to_string()\n sub_parts_list = self.get_string_sub_parts(original_text,\n changeable_string)\n new_sub_parts_list.extend(sub_parts_list)\n \n elif isinstance(sub_part, TTChangeableTextSubPart):\n\n new_sub_parts_list.append(sub_part)\n\n else:\n raise Exception(\"TTTextPart this should be impossible to reach.\")\n\n self.sub_parts_list = new_sub_parts_list\n\n\n\n\n\n # def get_sub_parts_list(self):\n\n # return self.sub_parts_list\n\n\n\n\n\n def get_changeable_sub_parts(self, changeable_string):\n\n # added de possibility of asking for a changeable string that\n # is not in the sub_parts_dictionary to support\n # change_one_occurrence_in_parts\n if changeable_string in self.sub_parts_dictionary:\n \n return self.sub_parts_dictionary[changeable_string]\n\n else:\n\n return []\n\n\n\n\n\n def transform(self, original_string, new_string):\n\n #print('|||||||| part transform')\n\n for sub_part in self.sub_parts_list:\n\n if (isinstance(sub_part, TTChangeableTextSubPart) and\n sub_part.get_original_text() == original_string):\n\n #print('|||||||| sub part transform')\n\n sub_part.transform(new_string)\n \n\n\n\n\n def get_original_text(self):\n\n result = \"\"\n\n for sub_part in self.sub_parts_list:\n\n result = result + sub_part.get_original_text()\n\n return result\n \n","sub_path":"03_Implementação/qom_questions_transformer/text_transformer/tt_text_part.py","file_name":"tt_text_part.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"607073670","text":"#armstrong numbers\nt= int(input())\nwhile t:\n num = int(input())\n n = num\n s = 0\n order = 3\n\n while n>0:\n d = n%10\n s = s + (d**order)\n n = int(n /10)\n if s == num:\n print('Yes')\n else:\n print('No')\n t= t-1\n\n ","sub_path":"Mathematical&AlgorithmPuzzle/armstrong.py","file_name":"armstrong.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"652254093","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport sys\n\ntry:\n from PySide.QtCore import *\n from PySide.QtGui import *\nexcept:\n print (\"Error: This program needs PySide module.\", file=sys.stderr)\n sys.exit(1)\n\nclass TermWidget(QTextEdit):\n \"\"\" A terminal-like Widget \"\"\"\n\n # ~ TODO: Finaliser backward\n read = Signal(str)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n # self.setGeometry(0, 0, 100, 200)\n self.setWordWrapMode(QTextOption.WrapAnywhere)\n self.read.connect(self.updateHistory)\n self.setColor(\"black\", \"lightgray\")\n self.setMinimumHeight(100)\n self.reset()\n\n def reset(self):\n \"\"\" Clean all text \"\"\"\n self.i = 0\n self.hpos = 0\n self.history = []\n self.startCursor = self.textCursor()\n self.displayPrompt()\n # missing reset current text\n\n def setColor(self, textColor, baseColor):\n palette = QPalette()\n palette.setColor(QPalette.Text, textColor)\n palette.setColor(QPalette.Base, baseColor)\n self.setPalette(palette)\n\n def freezeAtCurrentPos(self):\n self.moveCursor(QTextCursor.End)\n self.startCursor = self.textCursor().position()\n\n @Slot(str)\n def out(self, s):\n self.append(s + '\\n')\n self.freezeAtCurrentPos()\n\n @Slot(str)\n def updateHistory(self, line):\n self.history.append(line)\n self.hpos = len(self.history)\n\n def histNext(self):\n if self.hpos < len(self.history): self.hpos += 1\n return self.history[self.hpos] if 0 <= self.hpos < len(self.history) else \"\"\n\n def histPrev(self):\n if self.hpos >= 0: self.hpos -= 1\n return self.history[self.hpos] if 0 <= self.hpos < len(self.history) else \"\"\n\n def displayPrompt(self):\n self.i += 1\n self.insertPlainText(\"[{:d}]> \".format(self.i))\n self.freezeAtCurrentPos()\n\n def eraseLine(self):\n cursor = self.textCursor()\n cursor.movePosition(QTextCursor.End)\n cursor.movePosition(QTextCursor.Left, QTextCursor.KeepAnchor,\n len(self.document().toPlainText()) - self.startCursor)\n cursor.removeSelectedText()\n\n def keyPressEvent(self, event):\n if self.textCursor().position() < self.startCursor:\n return\n\n if event.key() == Qt.Key_Return:\n line = self.document().toPlainText()[self.startCursor:]\n self.freezeAtCurrentPos()\n self.read.emit(line)\n # self.out(line) # hook\n self.displayPrompt()\n elif event.key() == Qt.Key_Up:\n # ~ History up\n self.eraseLine()\n self.insertPlainText(self.histPrev())\n elif event.key() == Qt.Key_Down:\n # ~ History down\n self.eraseLine()\n self.insertPlainText(self.histNext())\n elif event.key() == Qt.Key_Backspace or event.key() == Qt.Key_Left:\n # ~ Ensure Backspace not erasing other lines\n if self.textCursor().position() > self.startCursor:\n super().keyPressEvent(event)\n else:\n super().keyPressEvent(event)\n","sub_path":"src/cm_terminal.py","file_name":"cm_terminal.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"428014102","text":"import numpy as np \nimport matplotlib as plt\n\n\ndef showArray (array_path1,array_path2):\n for i in range (len(array_path1)):\n path1 = np.load(array_path1[i],mmap_mode = 'r+')\n path2 = np.load(array_path2[i],mmap_mode = 'r+')\n fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(20, 10))\n ax[0].imshow(path1, cmap=plt.cm.gray)\n ax[1].imshow(path2, cmap=plt.cm.gray)\n \n \ndef loadAll (array_path):\n\tim = []\n\tfor i in range (len(array_path)):\n\t\tim.append(np.load(array_path[i]))\n\t#\tprint(\"successfully loaded\", array_path[i])\n#\tprint(\"successfully loaded all\")\n\treturn im\n\ndef findXY (matrix):\n row,col = matrix.shape\n lst = []\n for r in range(row):\n for c in range(col):\n if matrix[r][c] == 1:\n lst.append((r,c))\n return lst","sub_path":"Fluoro_Segmentation/myfunctions.py","file_name":"myfunctions.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"560736910","text":"#!/usr/bin/python\n#coding=utf-8\nimport urllib2\nimport HTMLParser\nimport sys\nimport xml.dom.minidom as minidom\nfrom htmlentitydefs import entitydefs\nimport glob\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\nclass MyParser(HTMLParser.HTMLParser):\n\tdef __init__(self):\n\t\tHTMLParser.HTMLParser.__init__(self)\n\t\tself.hasname = False\n\t\tself.namelist = []\n\tdef handle_starttag(self, tag, attrs):\n\t\tif tag == 'div':\n\t\t\tfor key, value in attrs:\n\t\t\t\tif key == 'class':\n\t\t\t\t\tif value == \"views-field views-field-title\":\n\t\t\t\t\t\tself.hasname = True\n\tdef handle_data(self, text):\n\t\tif self.hasname and text.isspace() == False:\n\t\t\tself.namelist.append(text)\n\tdef handle_endtag(self, tag):\n\t\tif tag == 'strong' and self.hasname == True:\n\t\t\tself.hasname = False\n\nfout_xml = file('UMN.xml', 'w')\ndoc = minidom.Document()\ninstitution = doc.createElement(\"institution\")\ndoc.appendChild(institution)\n\nif True:\n\trootUrl = 'https://www.cs.umn.edu/people/faculty'\n\tresponse = urllib2.urlopen(rootUrl)\n\thtml = response.read()\n\tmy = MyParser()\n\tmy.feed(html)\n\tfor i in range(len(my.namelist)):\n\t\tprofessor = doc.createElement(\"professor\")\n\t\tname = my.namelist[i]\n\t\tnamenode = doc.createElement(\"name\")\n\t\tnamenode.appendChild(doc.createTextNode(name))\n\t\tprofessor.appendChild(namenode)\n\t\tinstitution.appendChild(professor)\n\ndoc.writexml(fout_xml, \"\\t\", \"\\t\", \"\\n\")\nfout_xml.close()\n","sub_path":"liqian/UMN/scrap_professors.py","file_name":"scrap_professors.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"623389928","text":"\"\"\"\n Sendet informationen an einem Webservice\n\"\"\"\n\nimport requests\nimport config as config\n\n\ndef getIssueInfo(id):\n\n url = config.MANTIS_API + 'issues/' + str(id)\n headers = {\"Authorization\": config.TOKEN}\n x = requests.get(url, headers=headers)\n return x.json()\n\n\ndef updateIssue(id, updateInfo):\n\n url = config.MANTIS_API + 'issues/' + str(id)\n headers = {\"Authorization\": config.TOKEN,\n \"Content-Type\": \"application/json\"}\n print(updateInfo)\n x = requests.patch(url, json=updateInfo, headers=headers)\n print(x)\n\n\ndef createIssue(issueJson):\n\n url = config.MANTIS_API + 'issues/'\n authHeaders = {\"Authorization\": config.TOKEN,\n \"Content-Type\": \"application/json\"}\n print(issueJson)\n x = requests.post(url, json = issueJson, headers = authHeaders)\n print(x.text)\n\n\n","sub_path":"issues.py","file_name":"issues.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"567331199","text":"# imports\n\nimport os\nimport imageio\nimport numpy as np\nfrom skimage import io\nfrom skimage.color import rgb2gray\nimport matplotlib.pyplot as plt\nimport math\nfrom PIL import Image\nfrom skimage.filters import threshold_niblack, threshold_sauvola\nfrom skimage.transform import hough_line, hough_line_peaks\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n\n# get grayscale image\ndef readImg(filePath):\n imgOr = imageio.imread(filePath)\n img = rgb2gray(imgOr)\n return(img)\n\n\n# get adaptive threshold\ndef doThresholding(method, window_size, k, img):\n if method == 'niblack':\n th_mask = threshold_niblack(img, window_size=window_size, k=k)\n if method == 'sauvola':\n th_mask = threshold_sauvola(img, window_size=window_size)\n\n binary_mask = img < th_mask\n return(binary_mask)\n\ndef applyHough(nAngles, img):\n # hough\n # Classic straight-line Hough transform\n # Set a precision of 0.5 degree.\n tested_angles = np.linspace(-np.pi / 2, np.pi / 2, nAngles)\n\n h, theta, d = hough_line(img, theta=tested_angles)\n return(h, theta, d)\n\ndef getYs(sizeRef, h, theta, d):\n origin = np.array((0, sizeRef))\n allY0s = []\n allY1s = []\n for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):\n y0, y1 = (dist - origin * np.cos(angle)) / np.sin(angle)\n allY0s.append(y0)\n allY1s.append(y1)\n return(allY0s, allY1s)\n\ndef getLines(allY0s, allY1s, shapeIm):\n allRowsReturn = []\n allColsReturn = []\n nPoints = int(round(math.sqrt(shapeIm[0]**2+shapeIm[1]**2)))\n \n for i in range(0, len(allY0s)):\n y0 = allY0s[i]\n y1 = allY1s[i]\n x0, x1, y0, y1 = getCoords(y0, y1, shapeIm)\n \n allRows, allCols = getIdxsArray(x0, x1, y0, y1) \n allColsReturn.append(allCols)\n allRowsReturn.append(allRows)\n return(allRowsReturn, allColsReturn)\n\ndef getIdxsArray(x0, x1, y0, y1):\n #this converts plot coordinates to array coordinates\n #linespace to get all values between the plot ([x0, y0], [x1, y1]) coordinates\n allRows = list(map(int, np.ndarray.tolist(np.round(np.linspace(y0, y1, nPoints),0))))\n allCols = list(map(int, np.ndarray.tolist(np.round(np.linspace(x0, x1, nPoints),0))))\n return(allRows, allCols)\n \ndef getCoords(y0, y1, shapeIm):\n origin = np.array((0, shapeIm[1]))\n if y0 < 0 or y1 > shapeIm[0]:\n # this deals with vertical lines, using the equation of a line to\n # get the corresponding points fitting inside the figure coordinates space\n mLine = (y0-y1)/(origin[0]-origin[1])\n qLine = y0\n #when y=0 and y = (img height)-1\n y0 = 0\n y1 = shapeIm[0]-2\n x0 = int(round((y0-qLine)/mLine))\n x1 = int(round((y1-qLine)/mLine))\n else :\n x0 = 0\n x1 = shapeIm[1]-1\n y1 = y1-2\n # ugly piece of code to deal with going out of array index\n if y0 > shapeIm[0]-1:\n y0 = shapeIm[0]-1\n if y1 > shapeIm[0]-1:\n y1 = shapeIm[0]-1\n \n return(x0, x1, y0, y1)\n\ndef checkLines(filename, window_size, k, nAngles):\n img = readImg(filename)\n\n thImg = doThresholding('niblack', window_size, k, img)\n\n h, theta, d = applyHough(nAngles, thImg)\n\n allY0s, allY1s = getYs(thImg.shape[1], h, theta, d)\n\n return(allY0s, allY1s)\n\n\n","sub_path":"alternative_solutions/imageAnalysisGrid.py","file_name":"imageAnalysisGrid.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"46491605","text":"#!/gpfsm/dulocal/sles11/other/SLES11.3/miniconda3/2019.03_py3.7/2019-05-15/bin/python\n\nimport numpy as np\nimport csv\nimport sys\nimport datetime as dt\n\n#########################################################################\n######## CHANGE THIS TO BE A RELATIVE PATHWAY TO THE INIT_DIR ######\n#########################################################################\nproj_dir = '/discover/nobackup/jframe/p05noahgpr/nasa-intern-poster/'\ninit_dir = proj_dir + 'soil_moisture/init_dirs/2_2000-2004/'\n\n######### Set up some lists for the data ################\nx = []\nObservedSM = []\nnoahSM = []\nNoahGPR = []\nOneStpSM = []\nassimSM = []\n\n######## import all the data ########################3## \nwith open('output.out', 'r') as gprFile:\n gprData = csv.reader(gprFile, delimiter=' ', skipinitialspace=True)\n for gprRow in gprData:\n NoahGPR.append(float(gprRow[10]))\n NoahGPR = np.array(NoahGPR)\nwith open(init_dir + 'obs.txt', 'r') as obsFile:\n obsData = csv.reader(obsFile, delimiter=' ', skipinitialspace=True)\n for obsRow in obsData:\n ObservedSM.append(float(obsRow[5]))\n ObservedSM = np.array(ObservedSM)\nwith open(init_dir + 'output.noah', 'r') as oFile:\n fData = csv.reader(oFile, delimiter=' ', skipinitialspace=True )\n for row in fData:\n D = dt.datetime(int(row[0]),1,1) + \\\n dt.timedelta((int(row[1])-1) + float(row[2])/24)\n x.append(D)\n noahSM.append(float(row[10]))\n noahSM = np.array(noahSM)\n x = np.array(x)\nwith open(init_dir + 'output.onestep', 'r') as oFile:\n fData = csv.reader(oFile, delimiter=' ', skipinitialspace=True )\n for row in fData:\n OneStpSM.append(float(row[10]))\n OneStpSM = np.array(OneStpSM)\nwith open(init_dir + 'output.dassim', 'r') as oFile:\n fData = csv.reader(oFile, delimiter=' ', skipinitialspace=True )\n for row in fData:\n assimSM.append(float(row[10]))\n assimSM = np.array(assimSM)\n\n\n################# Calculate some statistics #### \na = 3\nb = 17520\n# Mean divergence\nMD = np.sum((np.mean(ObservedSM[a:b]) - ObservedSM[a:b])**2)\n#Sum of squared error\nSSE_noah_only = np.sum((ObservedSM[a:b] - noahSM[a:b])**2)\nSSE_noah_gpr = np.sum((ObservedSM[a:b] - NoahGPR[a:b])**2)\nSSE_noah_assim = np.sum((ObservedSM[a:b] - assimSM[a:b])**2)\n#Nash sutcliffe efficiency\nNS_noah_gpr = 1 - (SSE_noah_gpr/MD)\nNS_noah_assim = 1 - (SSE_noah_assim/MD)\nNS_noah_only = 1 - (SSE_noah_only/MD)\nprint('The Nash-Sutcliffe Efficiency for the test year (2005) is')\nprint('For Noah alone: ', str(NS_noah_only))\nprint('For Noah with Data Assimilation: ', str(NS_noah_assim))\nprint('For Noah with GPR: ', str(NS_noah_gpr))\n# Root mean squared error\nprint('The root mean squared errors for the test year (2005) are')\nprint('For Noah alone: ', str(np.sqrt(SSE_noah_only/(b-a))))\nprint('For Noah with Data Assimilation: ', str(np.sqrt(SSE_noah_assim/(b-a))))\nprint('For Noah with GPR: ', str(np.sqrt(SSE_noah_gpr/(b-a))))\n\n# END\n","sub_path":"setup_dir/gpr/print_GPR_test_results.py","file_name":"print_GPR_test_results.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"462889141","text":"from socket import *\n\nserverPort = 12000\n#192.168.50.66\nserverIp = '192.168.50.66'\nserverSocket = socket(AF_INET, SOCK_STREAM)\nserverSocket.bind((serverIp, serverPort))\nserverSocket.listen(1)\nprint('tcp server started\\n')\nwhile True:\n connectSock, clientAddr = serverSocket.accept()\n message = connectSock.recv(1024).decode()\n print('receive msg:', message, 'from:', clientAddr)\n modifiedMsg = message.upper()\n connectSock.send(modifiedMsg.encode())\n connectSock.close()\n","sub_path":"tcp_study/TCPServer.py","file_name":"TCPServer.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"43821501","text":"\n\n# FROM https://stackoverflow.com/questions/3093352/python-object-assignment-with-variable-argument-list\n@std.route('/tree_node', methods=['GET', 'POST'])\n@login_required\ndef tree_node(parent_gt, family_tree):\n\n\t\n global gt_not_in_gt_type\n global sts_color_set\n global gt_done\n global num_of_nodes\n\n print(\"PARENT ROOT - GT \", parent_gt)\n\n j_tree = { \n \"class_name\": parent_gt.class_name, \n \"gt_type\": parent_gt.gt_type, \n \"id\": parent_gt.id,\n \"title\": parent_gt.title,\n \"body\": parent_gt.body ,\n \"h_name\": parent_gt.h_name ,\n \"e_name\": parent_gt.e_name ,\n \"color\": parent_gt.color ,\n \"image_url\": parent_gt.image_url,\n #\"sts_color\": parent_gt.sts_color,\n #\"sts_title\": parent_gt.sts_title,\n\n \"CHILDREN\" : []\n }\n \n gt_done.append(parent_gt.id)\n db.session.commit()\n num_of_nodes = num_of_nodes+1 \n print(\"N \", num_of_nodes)\n print(\"\")\n\n if parent_gt.children==None or parent_gt.children==[]:\n print(\" GT {0} HS NO CHILDREN\".format(parent_gt))\n print(\"\")\n \n for child_gt in parent_gt.children:\n \n if child_gt==None or child_gt==[]: \n print(\"\")\n continue\n \n if child_gt.class_name == 'Status':\n sts_color = child_gt.color\n sts_title = child_gt.title\n sts_color_set = 1\n print(\"CHILD IS STATUS TYPE \")\n print(\"\")\n continue\n\n if child_gt.id in gt_done:\n print(\"NOT DOING {0} IT IS ALREADY DONE AND IT IS IN {1}\".format(child_gt, gt_done) )\n print(\"\")\n continue\n \n if child_gt.class_name == 'Person' or child_gt.class_name == 'Situation' or child_gt.class_name == 'Thought' or child_gt.class_name == 'Emotion' or child_gt.class_name == 'Behavior' or child_gt.class_name == 'Result':\n child_gt.gt_type = 'CBT'\n path_base = 'CBT'\n else:\n child_gt.gt_type = child_gt.class_name\n gt_not_in_gt_type.append(child_gt.id)\n continue\n \n db.session.commit()\n \n if child_gt.gt_type != family_tree or child_gt.id in gt_not_in_gt_type:\n print(\"Not DOING gt {0} it is NOT IN FAMILY TYPE {1} and it is in {2}: \".format(child_gt, family_tree, gt_not_in_gt_type ))\n print(\"\")\n continue\n \n print(\"DOING CHILD - GT IS \", child_gt)\n\n j_tree[\"CHILDREN\"].append( { \n \"class_name\": child_gt.class_name, \n \"gt_type\": child_gt.gt_type, \n \"id\": child_gt.id,\n \"title\": child_gt.title,\n \"body\": child_gt.body ,\n \"h_name\": child_gt.h_name ,\n \"e_name\": child_gt.e_name ,\n \"color\": child_gt.color ,\n \"image_url\": child_gt.image_url,\n #\"sts_color\": child_gt.sts_color,\n #\"sts_title\": child_gt.sts_title,\n\n \"CHILDREN\" : []\n } )\n \n if sts_color_set:\n j_tree.append( {\"sts_color\": sts_color, \"sts_title\": sts_title} )\n sts_color_set = 0\n\n gt_done.append(child_gt.id)\n db.session.commit()\n num_of_nodes = num_of_nodes+1\n \n \n\n print(\"N \", num_of_nodes)\n print(\"\") \n return j_tree\n\t\n\n","sub_path":"app/students/backup/j_tree.py","file_name":"j_tree.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"263780080","text":"from os import environ as ENV\n\nmaxN = 50\nnSpacing = 3\n\ndef vnlc(n, N):\n ret = ''\n if n < (N - 1):\n ret = ', '\n\n if n != (N - 1) and ((n + 1) % nSpacing) == 0:\n ret += '\\n '\n\n return ret;\n\ndef nlc(n, N):\n ret = ''\n if n < (N - 1):\n ret = ', '\n\n if n != (N - 1) and ((n + 1) % nSpacing) == 0:\n ret += '\\n '\n\n return ret;\n\ndef runFunctionFromArguments(N):\n return 'switch(argc){\\n' + '\\n'.join([runFunctionFromArgument(n + 1) for n in xrange(N)]) + '}'\n\ndef runFunctionFromArgument(N):\n return ' case ' + str(N) + \"\"\":\n f(occaKernelInfoArgs, occaInnerId0, occaInnerId1, occaInnerId2, \"\"\" + ', '.join(['args[{0}]'.format(n) for n in xrange(N)]) + \"\"\"); break;\"\"\"\n\ndef runKernelFromArguments(N):\n return 'switch(argc){\\n' + '\\n'.join([runKernelFromArgument(n + 1) for n in xrange(N)]) + '}'\n\ndef runKernelFromArgument(N):\n return ' case ' + str(N) + \"\"\":\n if(kHandle->nestedKernelCount == 0){\n (*kHandle)(\"\"\" + ', '.join(['args[{0}]'.format(n) for n in xrange(N)]) + \"\"\");\n }\"\"\" + ((\"\"\"\n else{\n (*kHandle)(kHandle->nestedKernels, \"\"\" + ', '.join(['args[{0}]'.format(n) for n in xrange(N)]) + \"\"\");\n }\"\"\") if (N < maxN) else '') + \"\"\"\n break;\"\"\"\n\ndef virtualOperatorDeclarations(N):\n return '\\n\\n'.join([virtualOperatorDeclaration(n + 1) for n in xrange(N)])\n\ndef virtualOperatorDeclaration(N):\n return ' virtual void operator () ({0}) = 0;'.format( ' '.join(['const kernelArg &arg' + str(n) + vnlc(n, N) for n in xrange(N)]) )\n\ndef operatorDeclarations(mode, N):\n return '\\n\\n'.join([operatorDeclaration(mode, n + 1) for n in xrange(N)])\n\ndef operatorDeclaration(mode, N):\n if mode == 'Base':\n ret = ' void operator () ({0});'.format( ' '.join(['const kernelArg &arg' + str(n) + nlc(n, N) for n in xrange(N)]) )\n else:\n ret = ' template <>\\n'\\\n + ' void kernel_t<{0}>::operator () ({1});'.format(mode, ' '.join(['const kernelArg &arg' + str(n) + nlc(n, N) for n in xrange(N)]) )\n\n return ret\n\ndef operatorDefinitions(mode, N):\n return '\\n\\n'.join([operatorDefinition(mode, n + 1) for n in xrange(N)])\n\ndef operatorDefinition(mode, N):\n if mode == 'Base':\n return \"\"\" void kernel::operator() (\"\"\" + ' '.join(['const kernelArg &arg' + str(n) + nlc(n, N) for n in xrange(N)]) + \"\"\"){\n \"\"\" + '\\n '.join(['arg' + str(n) + '.setupForKernelCall(kHandle->metaInfo.argIsConst(' + str(n) + '));' for n in xrange(N)]) + \"\"\"\n\n if(kHandle->nestedKernelCount == 0){\n (*kHandle)(\"\"\" + ' '.join(['arg' + str(n) + nlc(n, N) for n in xrange(N)]) + \"\"\");\n }\n else{\"\"\" + ((\"\"\"\n (*kHandle)(kHandle->nestedKernels, \"\"\" + ' '.join(['arg' + str(n) + nlc(n, N) for n in xrange(N)]) + \"\"\");\"\"\") \\\n if (N < maxN) else '') + \"\"\"\n }\n }\n\n void kernelDatabase::operator() (\"\"\" + ' '.join(['const kernelArg &arg' + str(n) + nlc(n, N) for n in xrange(N)]) + \"\"\"){/*\n occa::device_v *launchDevice = NULL;\n\n if(arg0.dHandle) launchDevice = const_cast(arg0.dHandle);\n \"\"\" + ' '.join([('else if(arg' + str(n + 1) + '.dHandle) launchDevice = const_cast(arg' + str(n + 1) + '.dHandle);\\n') for n in xrange(N - 1)]) + \"\"\"\n (*this)[launchDevice](\"\"\" + ' '.join(['arg' + str(n) + nlc(n, N) for n in xrange(N)]) + \"\"\");*/\n }\"\"\"\n else:\n header = operatorDefinitionHeader(mode, N)\n return header + operatorModeDefinition[mode](N) + \"\\n }\"\n\ndef operatorDefinitionHeader(mode, N):\n return \"\"\" template <>\n void kernel_t<{0}>::operator () ({1}){{\"\"\".format(mode, ' '.join(['const kernelArg &arg' + str(n) + nlc(n, N) for n in xrange(N)]))\n\ndef pthreadOperatorDefinition(N):\n return \"\"\"\n PthreadsKernelData_t &data_ = *((PthreadsKernelData_t*) data);\n\n kernelArg args[\"\"\" + str(N) + \"\"\"] = {\"\"\" + ' '.join(['arg' + str(n) + nlc(n, N) for n in xrange(N)]) + \"\"\"};\n\n pthreads::runFromArguments(data_, dims, inner, outer, \"\"\" + str(N) + \"\"\", args);\"\"\"\n\ndef serialOperatorDefinition(N):\n return \"\"\"\n SerialKernelData_t &data_ = *((SerialKernelData_t*) data);\n handleFunction_t tmpKernel = (handleFunction_t) data_.handle;\n int occaKernelArgs[6];\n\n occaKernelArgs[0] = outer.z;\n occaKernelArgs[1] = outer.y;\n occaKernelArgs[2] = outer.x;\n occaKernelArgs[3] = inner.z;\n occaKernelArgs[4] = inner.y;\n occaKernelArgs[5] = inner.x;\n\n int argc = 0;\n\n const kernelArg *args[\"\"\" + str(N) + \"\"\"] = {\"\"\" + ' '.join(['&arg' + str(n) + nlc(n, N) for n in xrange(N)]) + \"\"\"};\n\n for(int i = 0; i < \"\"\" + str(N) + \"\"\"; ++i){\n for(int j = 0; j < args[i]->argc; ++j){\n data_.vArgs[argc++] = args[i]->args[j].ptr();\n }\n }\n\n int occaInnerId0 = 0, occaInnerId1 = 0, occaInnerId2 = 0;\n\n cpu::runFunction(tmpKernel,\n occaKernelArgs,\n occaInnerId0, occaInnerId1, occaInnerId2,\n argc, data_.vArgs);\"\"\"\n\ndef ompOperatorDefinition(N):\n return \"\"\"\n OpenMPKernelData_t &data_ = *((OpenMPKernelData_t*) data);\n handleFunction_t tmpKernel = (handleFunction_t) data_.handle;\n int occaKernelArgs[6];\n\n occaKernelArgs[0] = outer.z;\n occaKernelArgs[1] = outer.y;\n occaKernelArgs[2] = outer.x;\n occaKernelArgs[3] = inner.z;\n occaKernelArgs[4] = inner.y;\n occaKernelArgs[5] = inner.x;\n\n int argc = 0;\n\n const kernelArg *args[\"\"\" + str(N) + \"\"\"] = {\"\"\" + ' '.join(['&arg' + str(n) + nlc(n, N) for n in xrange(N)]) + \"\"\"};\n\n for(int i = 0; i < \"\"\" + str(N) + \"\"\"; ++i){\n for(int j = 0; j < args[i]->argc; ++j){\n data_.vArgs[argc++] = args[i]->args[j].ptr();\n }\n }\n\n int occaInnerId0 = 0, occaInnerId1 = 0, occaInnerId2 = 0;\n\n cpu::runFunction(tmpKernel,\n occaKernelArgs,\n occaInnerId0, occaInnerId1, occaInnerId2,\n argc, data_.vArgs);\"\"\"\n\ndef clOperatorDefinition(N):\n return \"\"\"\n OpenCLKernelData_t &data_ = *((OpenCLKernelData_t*) data);\n cl_kernel kernel_ = data_.kernel;\n\n occa::dim fullOuter = outer*inner;\n\n int argc = 0;\n\n const kernelArg *args[\"\"\" + str(N) + \"\"\"] = {\"\"\" + (', '.join((('\\n ' + (' ' if (10 <= N) else ''))\n if (n and ((n % 5) == 0))\n else '')\n + \"&arg{0}\".format(n) for n in xrange(N))) + \"\"\"};\n\n OCCA_CL_CHECK(\"Kernel (\" + metaInfo.name + \") : Setting Kernel Argument [0]\",\n clSetKernelArg(kernel_, argc++, sizeof(void*), NULL));\n\n for(int i = 0; i < \"\"\" + str(N) + \"\"\"; ++i){\n for(int j = 0; j < args[i]->argc; ++j){\n OCCA_CL_CHECK(\"Kernel (\" + metaInfo.name + \") : Setting Kernel Argument [\" << (i + 1) << \"]\",\n clSetKernelArg(kernel_, argc++, args[i]->args[j].size, args[i]->args[j].ptr()));\n }\n }\n\n OCCA_CL_CHECK(\"Kernel (\" + metaInfo.name + \") : Kernel Run\",\n clEnqueueNDRangeKernel(*((cl_command_queue*) dHandle->currentStream),\n kernel_,\n (cl_int) dims,\n NULL,\n (uintptr_t*) &fullOuter,\n (uintptr_t*) &inner,\n 0, NULL, NULL));\"\"\"\n\ndef cudaOperatorDefinition(N):\n return \"\"\"\n CUDAKernelData_t &data_ = *((CUDAKernelData_t*) data);\n CUfunction function_ = data_.function;\n\n int occaKernelInfoArgs = 0;\n int argc = 0;\n\n const kernelArg *args[\"\"\" + str(N) + \"\"\"] = {\"\"\" + (', '.join((('\\n ' + (' ' if (10 <= N) else ''))\n if (n and ((n % 5) == 0))\n else '')\n + \"&arg{0}\".format(n) for n in xrange(N))) + \"\"\"};\n\n data_.vArgs[argc++] = &occaKernelInfoArgs;\n\n for(int i = 0; i < \"\"\" + str(N) + \"\"\"; ++i){\n for(int j = 0; j < args[i]->argc; ++j){\n data_.vArgs[argc++] = args[i]->args[j].ptr();\n }\n }\n\n OCCA_CUDA_CHECK(\"Launching Kernel\",\n cuLaunchKernel(function_,\n outer.x, outer.y, outer.z,\n inner.x, inner.y, inner.z,\n 0, *((CUstream*) dHandle->currentStream),\n data_.vArgs, 0));\"\"\"\n\ndef coiOperatorDefinition(N):\n return \"\"\"\n COIKernelData_t &data_ = *((COIKernelData_t*) data);\n COIDeviceData_t &dData = *((COIDeviceData_t*) ((device_t*) dHandle)->data);\n int occaKernelArgs[6];\n\n occaKernelArgs[0] = outer.z;\n occaKernelArgs[1] = outer.y;\n occaKernelArgs[2] = outer.x;\n occaKernelArgs[3] = inner.z;\n occaKernelArgs[4] = inner.y;\n occaKernelArgs[5] = inner.x;\n\n uintptr_t kSize = sizeof(data_.kernel);\n\n ::memcpy(&(data_.hostArgv[0]) , &(data_.kernel) , kSize);\n ::memcpy(&(data_.hostArgv[kSize]), &(occaKernelArgs[0]), 6*sizeof(int));\n\n int hostPos = kSize + 6*sizeof(int) + \"\"\" + str(N) + \"\"\"*sizeof(int);\n\n int typePos = 0;\n int devicePos = 0;\n\n int *typePtr = (int*) (data_.hostArgv + kSize + 6*sizeof(int));\n\n \"\"\" + '\\n '.join([\"\"\"if(arg{0}.pointer){{\n typePtr[typePos++] = (int) ((1 << 31) + devicePos);\n data_.deviceArgv[devicePos] = *((coiMemory*) arg{0}.data());\n data_.deviceFlags[devicePos] = COI_SINK_WRITE;\n ++devicePos;\n }}\n else{{\n typePtr[typePos++] = (int) ((0 << 31) + hostPos);\n ::memcpy(&(data_.hostArgv[hostPos]), &(arg{0}.arg), arg{0}.size);\n hostPos += arg{0}.size;\n }}\"\"\".format(n) for n in xrange(N)]) + \"\"\"\n\n coiStream &stream = *((coiStream*) dHandle->currentStream);\n\n COIPipelineRunFunction(stream.handle,\n dData.kernelWrapper[\"\"\" + str(N - 1) + \"\"\"],\n devicePos,\n (const coiMemory*) data_.deviceArgv,\n (const coiMemoryFlags*) data_.deviceFlags,\n false,\n __null,\n data_.hostArgv,\n hostPos,\n __null,\n 0,\n &(stream.lastEvent));\"\"\"\n\ndef cOperatorDeclarations(N):\n return '\\n\\n'.join([cOperatorDeclaration(n + 1) for n in xrange(N)])\n\ndef cOperatorDeclaration(N):\n return ' OCCA_LFUNC void OCCA_RFUNC occaKernelRun{0}(occaKernel kernel, {1});\\n'.format(N, ' '.join(['void *arg' + str(n) + nlc(n, N) for n in xrange(N)]) )\n\ndef cOperatorDefinitions(N):\n return '\\n\\n'.join([cOperatorDefinition(n + 1) for n in xrange(N)])\n\ndef cOperatorDefinition(N):\n argsContent = ', '.join('(occaType_t*) arg' + str(n) for n in xrange(N))\n\n return (' void OCCA_RFUNC occaKernelRun{0}(occaKernel kernel, {1}){{\\n'.format(N, ' '.join(['void *arg' + str(n) + nlc(n, N) for n in xrange(N)]) ) + \\\n ' occa::kernel kernel_((occa::kernel_v*) kernel);\\n' + \\\n ' kernel_.clearArgumentList();\\n' + \\\n ' \\n' + \\\n ' occaType_t *args[' + str(N) + '] = {' + argsContent + '};\\n' + \\\n ' \\n' + \\\n ' for(int i = 0; i < ' + str(N) + '; ++i){\\n' + \\\n ' occaType_t &arg = *(args[i]);\\n' + \\\n ' void *argPtr = arg.value.data.void_;\\n' + \\\n ' \\n' + \\\n ' if(arg.type == OCCA_TYPE_MEMORY){\\n' + \\\n ' occa::memory memory_((occa::memory_v*) argPtr);\\n' + \\\n ' kernel_.addArgument(i, occa::kernelArg(memory_));\\n' + \\\n ' }\\n' + \\\n ' else if(arg.type == OCCA_TYPE_PTR){\\n' + \\\n ' occa::memory memory_((void*) argPtr);\\n' + \\\n ' kernel_.addArgument(i, occa::kernelArg(memory_));\\n' + \\\n ' }\\n' + \\\n ' else {\\n' + \\\n ' kernel_.addArgument(i, occa::kernelArg(arg.value));\\n' + \\\n ' delete (occaType_t*) args[i];\\n' + \\\n ' }\\n' + \\\n ' }\\n' + \\\n ' \\n' + \\\n ' kernel_.runFromArguments();\\n' + \\\n ' }\\n');\n\noperatorModeDefinition = { 'Serial' : serialOperatorDefinition,\n 'OpenMP' : ompOperatorDefinition,\n 'OpenCL' : clOperatorDefinition,\n 'CUDA' : cudaOperatorDefinition,\n 'Pthreads' : pthreadOperatorDefinition,\n 'COI' : coiOperatorDefinition }\n\noccaDir = ENV['OCCA_DIR']\n\nhpp = open(occaDir + '/include/occa/operators/virtualDeclarations.hpp', 'w')\nhpp.write(virtualOperatorDeclarations(maxN));\nhpp.write('\\n'); # Make sure there is a newline at the end of the file\nhpp.close()\n\nhpp = open(occaDir + '/include/occa/operators/declarations.hpp', 'w')\nhpp.write(operatorDeclarations('Base', maxN));\nhpp.write('\\n');\nhpp.close()\n\nhpp = open(occaDir + '/src/operators/definitions.cpp', 'w')\nhpp.write(operatorDefinitions('Base', maxN));\nhpp.write('\\n');\nhpp.close()\n\nhpp = open(occaDir + '/src/operators/runFunctionFromArguments.cpp', 'w')\nhpp.write(runFunctionFromArguments(maxN));\nhpp.write('\\n');\nhpp.close()\n\nhpp = open(occaDir + '/src/operators/runKernelFromArguments.cpp', 'w')\nhpp.write(runKernelFromArguments(maxN));\nhpp.write('\\n');\nhpp.close()\n\nfor mode in operatorModeDefinition:\n hpp = open(occaDir + '/include/occa/operators/' + mode + 'KernelOperators.hpp', 'w')\n hpp.write(operatorDeclarations(mode, maxN));\n hpp.write('\\n');\n hpp.close()\n\n cpp = open(occaDir + '/src/operators/' + mode + 'KernelOperators.cpp', 'w')\n cpp.write(operatorDefinitions(mode, maxN));\n cpp.write('\\n');\n cpp.close()\n\nhpp = open(occaDir + '/include/occa/operators/cKernelOperators.hpp', 'w')\nhpp.write(cOperatorDeclarations(maxN));\nhpp.write('\\n');\nhpp.close()\n\ncpp = open(occaDir + '/src/operators/cKernelOperators.cpp', 'w')\ncpp.write(cOperatorDefinitions(maxN));\ncpp.write('\\n');\ncpp.close()\n","sub_path":"scripts/setupKernelOperators.py","file_name":"setupKernelOperators.py","file_ext":"py","file_size_in_byte":15182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"552268545","text":"import librosa\nimport librosa.display\nimport matplotlib.pyplot as plt\nfrom scipy.io import wavfile\nimport numpy as np\n\nfile = \"sweep_200hz-500hz.wav\"\n#file = \"sweep_200Hz_1000Hz_-3dBFS_1s.wav\"\n#file = \"heartbeat.wav\"\n#file = \"we_will_rock_you.wav\"\n\ny, sr = librosa.load(file)\n#librosa.feature.melspectrogram(y=y, sr=sr)\n\n# Using a pre-computed power spectrogram\n\nD = np.abs(librosa.stft(y))**2\nS = librosa.feature.melspectrogram(S=D)\n\n# Passing through arguments to the Mel filters\nS2 = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=int(sr/24), hop_length=int(sr/24), n_mels=20,\n )\n\nimport matplotlib.pyplot as plt\nplt.figure(figsize=(10, 4))\nax1 = plt.subplot(2,1,1)\n\nlibrosa.display.specshow(librosa.power_to_db(S2, ref=np.max), y_axis='mel', x_axis='time')\n#plt.colorbar(format='%+2.0f dB')\nplt.title('Mel spectrogram (Heartbeat.wav)')\nplt.tight_layout()\n\n#plt.figure(figsize=(10, 4))\n#librosa.display.specshow(librosa.power_to_db(S, ref=np.max), y_axis=\"mel\", x_axis=\"time\")\n#plt.colorbar(format='%+2.0f dB')\n#plt.title('Regular spectrogram')\n#plt.tight_layout()\n\nax2 = plt.subplot(212)\nplt.plot(y)\nplt.title('Heartbeat.wav')\nplt.tight_layout()\nplt.xlabel(\"Samples (22055 samples per second)\")\nplt.ylabel(\"Amplitude\")","sub_path":"music/mfcc_test.py","file_name":"mfcc_test.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"315573938","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 13 16:24:19 2019\nThis is the face images cluster\n\n@author: DiXuanA\n\"\"\"\n\nimport sys\nimport os\nimport dlib\nimport glob\nimport cv2\nimport eduai_api as api\nfrom sklearn.cluster import DBSCAN\n\ncurrent_path = os.getcwd() \n\nface_folder = current_path + '/data/faces_from_video/' \noutput_folder = current_path + '/data/cluster_output/'\nprint(face_folder)\ndetector = api.get_face_detector()\nface_recognizer = api.get_face_recognizer()\nshape_detector = api.get_68_point_predictor()\n\ndescriptors = []\nimages = []\n\nfor f in glob.glob(os.path.join(face_folder, \"*.jpg\")):\n \n print('Processing file:{}'.format(f))\n img = cv2.imread(f)\n img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n dets = detector(img2, 1)\n \n for index, face in enumerate(dets):\n shape = shape_detector(img2, face)\n face_descriptor = face_recognizer.compute_face_descriptor(img2, shape)\n descriptors.append(face_descriptor)\n images.append((img2, shape))\n\n#clt = DBSCAN(eps=0.35,min_samples=2,metric = \"euclidean\")\n#labels = clt.fit_predict(descriptors)\n#labels = labels+1\n#images_number = len(descriptors) \n#print(\"Number of images : {}\".format(images_number)) \n\nlabels = dlib.chinese_whispers_clustering(descriptors, 0.4)\nprint(\"labels: {}\".format(labels))\nnum_classes = len(set(labels))\nprint(\"Number of clusters: {}\".format(num_classes))\n\nnoise = [i==0 for i in list(labels)].count(True)\n\nprint(\"Number of noise : {}\".format(noise))\nnum_classes = len(set(labels))\nprint(\"Number of clusters : {}\".format(num_classes))\nprint(\"labels: {}\".format(labels))\nface_dict = {}\nfor i in range(num_classes):\n face_dict[i] = []\n \nfor i in range(len(labels)):\n face_dict[labels[i]].append(images[i])\n \nfor key in face_dict.keys():\n file_dir = os.path.join(output_folder , \"ID000\" + str(key + 1))\n if not os.path.isdir(file_dir):\n os.makedirs(file_dir)\n \n for index, (image, shape) in enumerate(face_dict[key]):\n file_path = os.path.join(file_dir, 'face_' + str(index))\n dlib.save_face_chip(image, shape, file_path, size=150, padding=0.25)\n","sub_path":"face_cluster.py","file_name":"face_cluster.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"136080937","text":"from textwrap import dedent\n\nwelcome_message = \"\"\"**************************************\n** Welcome to the Snakes Cafe! **\n** Please see our menu below. **\n**\n** To quit at any time, type \"quit\" **\n**************************************\"\"\"\n\norder_message = \"\"\"\n***********************************\n** What would you like to order? **\n***********************************\n\"\"\"\n\nNA_message = \"\"\"\n***********************************\n** This item does not exist. Please enter a valid item from the menu **\n***********************************\n\"\"\"\n\nmenu_list = {\n \"Appetizers\": {\n \"Wings\": 0,\n \"Cookies\": 0,\n \"Spring Rolls\": 0\n },\n \"Entrees\": {\n \"Salmon\": 0,\n \"Steak\": 0,\n \"Meat Tornado\": 0,\n \"A Literal Garden\": 0\n },\n \"Desserts\": {\n \"Ice Cream\": 0,\n \"Cake\": 0,\n \"Pie\": 0\n },\n \"Drinks\": {\n \"Coffee\": 0,\n \"Tea\": 0,\n \"Soda\": 0\n },\n}\n\n\ndef print_menu():\n for menu in menu_list:\n print(dedent(f\"\"\"\n {menu}\n ****************\n \"\"\"))\n for key, value in menu_list[menu].items():\n print(key)\n\n\ndef process_menu(menu_order): \n flag = False\n for menu in menu_list:\n current_menu = menu_list[menu]\n menu_order = menu_order.title() \n if menu_order in current_menu.keys():\n quantity = current_menu[menu_order]\n quantity += 1\n current_menu[menu_order] = quantity\n print(f\"Item ordered {menu_order} with quantity {quantity}\")\n flag = True\n return flag \n\nif __name__ == '__main__':\n print(welcome_message)\n print_menu()\n input_value = input(order_message)\n while input_value != \"quit\":\n flag = process_menu(input_value)\n if(flag):\n input_value = input(order_message)\n else:\n print(NA_message)\n input_value = input(order_message)\n\n","sub_path":"snakes_cafe.py","file_name":"snakes_cafe.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"30641206","text":"import logging\nimport subprocess\nfrom itertools import chain\n\nfrom . import homebrew\nfrom .utils import run\n\nlog = logging.getLogger(__name__)\n\n\ndef restart_os_functions(*args, **kwargs):\n for item in ('Finder', 'Dock', 'SystemUIServer'):\n cmd = ['killall', item]\n log.info(f\"Executing command: {cmd!r}\")\n subprocess.check_call(cmd)\n\n\nDEFAULTS_TYPE_MAP = {\n bool: 'bool',\n int: 'int',\n float: 'float',\n str: 'string',\n dict: 'dict',\n}\n\ndef flatten(value):\n # will throw exception for unknown type, which is fine\n result = [f'-{DEFAULTS_TYPE_MAP[type(value)]}']\n if isinstance(value, dict):\n result.extend(chain.from_iterable((k, *flatten(v)) for k, v in value.items()))\n else:\n result.append(str(value))\n\n return result\n\nclass _DefaultsDomain:\n def __init__(self, domain=None):\n self.domain = domain\n\n def __getitem__(self, key):\n if not self.domain: # still needs a domain\n return _DefaultsDomain(key)\n\n run([\"defaults\", \"read\", self.domain, key])\n\n def __setitem__(self, key, value):\n run([\"defaults\", \"write\", self.domain, key, *flatten(value)])\n\n\ndefaults = _DefaultsDomain()\ndefaults.g = defaults['-g']\n","sub_path":"HOME/bin/lib/mac.py","file_name":"mac.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"47611061","text":"import sqlite3\nimport datetime\nfrom .model import Model\nimport config\n\n\nclass DatabaseMapper:\n conn = sqlite3.connect(\n config.database[\"full_path\"]\n )\n\n def __init__(self, table_name):\n self.cursor = self.conn.cursor()\n self.table_name = table_name\n\n def post(self, contents):\n \"\"\" Adds a new row to the specified table\n\n Args:\n table_name (str): name of the table as it appears in the database\n contents (dict): dictionary of contents to insert.\n structured as {name:value}\n\n Returns:\n Model for row in the table\n \"\"\"\n sql = \"INSERT INTO {} \".format(self.table_name)\n columns = tuple(contents.keys())\n values = list(contents.values())\n for i, value in enumerate(values):\n if isinstance(value, datetime.date):\n values[i] = str(value)\n values = tuple(values)\n sql += \"{} VALUES {};\".format(columns, values)\n self._execute_sql(sql)\n id_val = self.cursor.lastrowid\n model = self.get(id_val)\n return model\n\n def get(self, id_val):\n sql = \"SELECT * FROM {} WHERE id = {} \\\n LIMIT 1;\".format(self.table_name, id_val)\n response = self._execute_sql(sql)\n response = response.fetchone()\n model = self._create_model(response)\n return model\n\n def str_to_datetime(self, date_str):\n date_time = datetime.datetime.strptime(\n date_str,\n \"%Y-%m-%d %H:%M\"\n )\n return date_time\n\n def _execute_sql(self, sql):\n response = self.cursor.execute(sql)\n self.conn.commit()\n return response\n\n def _create_model(self, value_tuple):\n contents = dict(zip(self.columns, value_tuple))\n model = Model(self.table_name, contents, self.conn)\n return model\n","sub_path":"src/database_manager/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"170783402","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nimport tkinter as tk\nfrom tkinter.ttk import Treeview\nfrom Data.dataBase import DataBase\nfrom Views.addContact import AddContactWindow\nfrom Views.aboutUs import AboutUsWindow\nimport sqlite3 as sql\n\n\ndef updateTreeview(rows):\n trv.delete(*trv.get_children())\n for i in rows:\n trv.insert(\"\", 'end', values=i)\n\n\ndef search():\n q2 = q.get()\n search_query = \"SELECT * FROM Contacts WHERE first_name LIKE '%\" + q2 + \"%' or last_name LIKE '%\" + q2 + \"%'\"\n cursor.execute(search_query)\n rows = cursor.fetchall()\n updateTreeview(rows)\n\n\ndb = sql.connect(\"phone_book.db\")\ncursor = db.cursor()\ncreate_table_query = \"CREATE TABLE IF NOT EXISTS Contacts (contact_id INTEGER PRIMARY KEY AUTOINCREMENT, first_name \" \\\n \"text, last_name text, \" \\\n \"email text, phone_number text) \"\n\n\n# testing the table and insert into her\n# # insert_query = \"INSERT INTO Contacts(first_name, last_name, email, phone_number) VALUES(?,?,?,?)\"\n# cursor.execute(create_table_query)\n# cursor.execute(insert_query, (\"Jose\", \"Martinez\", \"Mjose@gmail.com\", \"809-325-2626\"))\n\ndef clear():\n clear_query = \"SELECT * FROM Contacts\"\n cursor.execute(clear_query)\n rows = cursor.fetchall()\n updateTreeview(rows)\n\n\ndef clear_text():\n search_entry.delete(0, 'end')\n id_entry.delete(0, 'end')\n entry1.delete(0, 'end')\n entry2.delete(0, 'end')\n entry3.delete(0, 'end')\n entry4.delete(0, 'end')\n search()\n\n\ndef getRow(event):\n row_id = trv.identify_row(event.y)\n item = trv.item(trv.focus())\n t_id.set(item['values'][0])\n t1.set(item['values'][1])\n t2.set(item['values'][2])\n t3.set(item['values'][3])\n t4.set(item['values'][4])\n\n\n# instantiating the class Tkinter\nroot = Tk()\n\n# instantiating the add contact window and pass the root like parameter into the instance\n\n\nroot.title(\"PhoneBook App\")\nroot.geometry(\"800x700\")\nroot.resizable(False, False)\n\n# instantiating menu\nmenubar = Menu(root)\n\n\ndef addContactWindow():\n add_contact_window = AddContactWindow(root)\n return add_contact_window\n\n\nroot.configure(menu=menubar)\n\nfile_menu = Menu(menubar, tearoff=1)\nfile_menu.add_command(label=\"Exit\", command=root.quit)\nmenubar.add_cascade(label=\"File\", menu=file_menu)\n\nq = StringVar()\nt_id = StringVar()\nt1 = StringVar()\nt2 = StringVar()\nt3 = StringVar()\nt4 = StringVar()\n\n\ndef aboutUsWindow():\n about_us_window = AboutUsWindow(root)\n return about_us_window\n\n\n# menu option about us\nabout_menu = Menu(menubar, tearoff=1)\nabout_menu.add_command(label=\"About us\", command=aboutUsWindow)\nmenubar.add_cascade(label=\"Help\", menu=about_menu)\n\n# defining 3 sections into the window\nwrapper1 = LabelFrame(root, text=\"Contact List\")\nwrapper2 = LabelFrame(root, text=\"Search\")\nwrapper3 = LabelFrame(root, text=\"Contact Data\")\n\nwrapper1.pack(fill=\"both\", expand=\"yes\", padx=20, pady=10)\nwrapper2.pack(fill=\"both\", expand=\"yes\", padx=20, pady=10)\nwrapper3.pack(fill=\"both\", expand=\"yes\", padx=20, pady=10)\n# List section\ntrv = Treeview(wrapper1, columns=(1, 2, 3, 4, 5), show=\"headings\", height=6)\n\ntrv.pack(side=LEFT)\ntrv.place(x=0, y=0)\ntrv.heading(\"#0\", text=\"\")\ntrv.heading(\"#1\", text=\"Contact ID\")\ntrv.heading(\"#2\", text=\"First Name\")\ntrv.heading(\"#3\", text=\"Last Name\")\ntrv.heading(\"#4\", text=\"Email\")\ntrv.heading(\"#5\", text=\"Phone No.\")\n\ntrv.column('#0', width=50, minwidth=100)\ntrv.column('#1', width=150, minwidth=200)\ntrv.column('#2', width=150, minwidth=200)\ntrv.column('#3', width=150, minwidth=200)\ntrv.column('#4', width=150, minwidth=200)\ntrv.column('#5', width=150, minwidth=200)\ntrv.bind('', getRow)\n# vertical scrollbar\nyscrollbar = ttk.Scrollbar(wrapper1, orient='vertical', command=trv.yview)\nyscrollbar.pack(side=RIGHT, fill=\"y\")\n# horizontal scrollbar\nxscrollbar = ttk.Scrollbar(wrapper1, orient='horizontal', command=trv.xview)\nxscrollbar.pack(side=BOTTOM, fill='x')\ntrv.configure(yscrollcommand=yscrollbar.set, xscrollcommand=xscrollbar.set)\n\nquery = \"SELECT contact_id, first_name, last_name, email, phone_number FROM Contacts\"\ncursor.execute(query)\nrows = cursor.fetchall()\nupdateTreeview(rows)\n\n# Search Section\nlabel = Label(wrapper2, text=\"Search\")\nlabel.pack(side=tk.LEFT, padx=10)\n\n# label, entry and button search and clear\nsearch_entry = Entry(wrapper2, textvariable=q)\nsearch_entry.pack(side=tk.LEFT, padx=6)\nsearch_button = Button(wrapper2, text=\"Search\", command=search)\nsearch_button.pack(side=tk.LEFT, padx=6)\nclear_button = Button(wrapper2, text=\"Clear\", command=lambda: [search, clear_text()])\nclear_button.pack(side=tk.LEFT, padx=6)\n\n# labels and entries that belongs to wraper 3\nid_label = Label(wrapper3, text=\"Contact ID:\")\nid_label.grid(column=1, row=0, padx=5, pady=3)\nid_entry = Entry(wrapper3, textvariable=t_id)\nid_entry.grid(column=2, row=0, padx=5, pady=3)\n\nlabel1 = Label(wrapper3, text=\"First Name:\")\nlabel1.grid(column=1, row=1, padx=5, pady=3)\nentry1 = Entry(wrapper3, textvariable=t1)\nentry1.grid(column=2, row=1, padx=5, pady=3)\n\nlabel2 = Label(wrapper3, text=\"Last Name:\")\nlabel2.grid(column=1, row=2, padx=5, pady=3)\nentry2 = Entry(wrapper3, textvariable=t2)\nentry2.grid(column=2, row=2, padx=5, pady=3)\n\nlabel3 = Label(wrapper3, text=\"Email:\")\nlabel3.grid(column=1, row=3, padx=5, pady=3)\nentry3 = Entry(wrapper3, textvariable=t3)\nentry3.grid(column=2, row=3, padx=5, pady=3)\n\nlabel4 = Label(wrapper3, text=\"Phone #:\")\nlabel4.grid(column=1, row=4, padx=5, pady=3)\nentry4 = Entry(wrapper3, textvariable=t4)\nentry4.grid(column=2, row=4, padx=5, pady=3)\n\n\ndef updateContact():\n firts = t1.get()\n last = t2.get()\n email = t3.get()\n phone = t4.get()\n id = t_id.get()\n print(id)\n update_query = \"UPDATE Contacts SET First_name = ?, Last_Name=?, email=?, phone_number=? WHERE contact_id = ?\"\n if messagebox.askyesno(\"Confirm\", \"Are you sure you want to update the record\"):\n cursor.execute(update_query, (firts, last, email, phone, id))\n db.commit()\n clear()\n clear_text()\n else:\n return True\n\n\n# button action update contact\nupdate_button = Button(wrapper3, text=\"Update\", command=updateContact)\nupdate_button.grid(column=3, row=4, padx=5, pady=3)\n\n\ndef addContact():\n firts = t1.get()\n last = t2.get()\n email = t3.get()\n phone = t4.get()\n insert_query = \"INSERT INTO Contacts(first_name, last_name, email, phone_number) VALUES(?,?,?,?)\"\n cursor.execute(insert_query, (firts, last, email, phone))\n clear()\n clear_text()\n\n\n# button action add or insert a new contact\nadd_button = Button(wrapper3, text=\"Add\", command=addContact)\nadd_button.grid(column=4, row=4, padx=5, pady=3)\n\n\ndef deleteContact():\n delete_query = \"DELETE FROM Contacts WHERE contact_id = \" + t_id.get()\n print(delete_query)\n if messagebox.askyesno(\"Confirm delete\", \"Are you sure you want to delete this record?\"):\n cursor.execute(delete_query)\n clear()\n clear_text()\n db.commit()\n else:\n return True\n\n\n# button action delete\ndelete_button = Button(wrapper3, text=\"Delete\", command=deleteContact)\ndelete_button.grid(column=5, row=4, padx=5, pady=3)\n\nroot.mainloop()\n","sub_path":"Views/mainGui.py","file_name":"mainGui.py","file_ext":"py","file_size_in_byte":7180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"598613660","text":"import matplotlib.pyplot as plt\n\n\ndef graphplt(x_value1,x_value2,x_value3,x_value4, fig_main, labelX, labelY):\n plt.figure(figsize=[15, 15],num=fig_main)\n plt.plot(x_value1,color='olive',label = \"GCN\")\n plt.plot(x_value2, color='red', label=\"GCN+CNN\")\n plt.plot(x_value3, color='blue', label=\"CNN\")\n plt.plot(x_value4, color='indigo', label=\"GCN+RNN\")\n plt.xlabel(labelX, fontsize=14)\n plt.ylabel(labelY, fontsize=14)\n plt.legend(loc='best')\n plt.savefig(fig_main+\".png\")\n plt.show()\n","sub_path":"Hybrid/graphplt.py","file_name":"graphplt.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"423854793","text":"load(\"//android:android_proto_compile.bzl\", \"android_proto_compile\")\nload(\"@build_bazel_rules_android//android:rules.bzl\", \"android_library\")\n\ndef android_proto_library(**kwargs):\n # Compile protos\n name_pb = kwargs.get(\"name\") + \"_pb\"\n android_proto_compile(\n name = name_pb,\n **{k: v for (k, v) in kwargs.items() if k in (\"deps\", \"verbose\")} # Forward args\n )\n\n # Create android library\n android_library(\n name = kwargs.get(\"name\"),\n srcs = [name_pb],\n deps = PROTO_DEPS,\n exports = PROTO_DEPS,\n visibility = kwargs.get(\"visibility\"),\n )\n\nPROTO_DEPS = [\n \"@com_google_guava_guava_android//jar\",\n \"@com_google_protobuf//:protobuf_javalite\",\n \"@javax_annotation_javax_annotation_api//jar\"\n]\n","sub_path":"android/android_proto_library.bzl","file_name":"android_proto_library.bzl","file_ext":"bzl","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"624776481","text":"from datetime import date, time, timedelta\nfrom decimal import Decimal\nimport random\n\nfrom django.test import TestCase\nfrom django.utils import timezone\n\nfrom .models import Person\nfrom .fixtures import create_fixtures\n\n\nclass BulkUpdateTests(TestCase):\n\n def setUp(self):\n self.now = timezone.now().replace(microsecond=0) # mysql doesn't do microseconds. # NOQA\n self.date = date(2015, 3, 28)\n self.time = time(13, 0)\n create_fixtures()\n\n def test_big_integer_field(self):\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.big_age = idx + 27\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.big_age, idx + 27)\n\n def test_comma_separated_integer_field(self):\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.comma_separated_age = str(idx) + ',27'\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.comma_separated_age, str(idx) + ',27')\n\n def test_integer_field(self):\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.age = idx + 27\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.age, idx + 27)\n\n def test_positive_integer_field(self):\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.positive_age = idx + 27\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.positive_age, idx + 27)\n\n def test_positive_small_integer_field(self):\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.positive_small_age = idx + 27\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.positive_small_age, idx + 27)\n\n def test_small_integer_field(self):\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.small_age = idx + 27\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.small_age, idx + 27)\n\n def test_boolean_field(self):\n vals = [True, False, True]\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.certified = vals[idx % 3]\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.certified, vals[idx % 3])\n\n def test_null_boolean_field(self):\n \"\"\"\n Null-boolean values are saved correctly\n \"\"\"\n\n vals = [True, False, None]\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.null_certified = vals[idx % 3]\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.null_certified, vals[idx % 3])\n\n def test_char_field(self):\n people = Person.objects.order_by('pk').all()\n\n # change names with bulk update\n names = ['Walter', 'The Dude', 'Donny', 'Jesus', 'Buddha', 'Clark']\n for idx, person in enumerate(people):\n person.name = names[idx]\n Person.objects.bulk_update(people)\n\n # check that names are set\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.name, names[idx])\n\n def test_email_field(self):\n \"\"\"\n Email values are saved correctly\n \"\"\"\n\n emails = ['walter@mailinator.com', 'thedude@mailinator.com',\n 'donny@mailinator.com', 'jesus@mailinator.com',\n 'buddha@mailinator.com', 'clark@mailinator.com']\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.email = emails[idx]\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.email, emails[idx])\n\n def test_file_path_field(self):\n file_paths = ['/home/dummy.txt', '/Downloads/kitten.jpg',\n '/Users/user/fixtures.json', 'dummy.png',\n 'users.json', '/home/dummy.png']\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.file_path = file_paths[idx]\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.file_path, file_paths[idx])\n\n def test_slug_field(self):\n people = Person.objects.order_by('pk').all()\n\n slugs = ['jesus', 'buddha', 'clark', 'the-dude', 'donny', 'walter']\n for idx, person in enumerate(people):\n person.slug = slugs[idx]\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.slug, slugs[idx])\n\n def test_text_field(self):\n people = Person.objects.order_by('pk').all()\n\n texts = ['this is a dummy text', 'dummy text', 'bla bla bla bla bla',\n 'here is a dummy text', 'dummy', 'bla bla bla']\n for idx, person in enumerate(people):\n person.text = texts[idx]\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.text, texts[idx])\n\n def test_url_field(self):\n people = Person.objects.order_by('pk').all()\n urls = ['docs.djangoproject.com', 'news.ycombinator.com',\n 'https://docs.djangoproject.com', 'https://google.com',\n 'google.com', 'news.ycombinator.com']\n for idx, person in enumerate(people):\n person.url = urls[idx]\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.url, urls[idx])\n\n def test_date_time_field(self):\n \"\"\"\n Datetime values are saved correctly\n \"\"\"\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.date_time = self.now - timedelta(days=1 + idx,\n hours=1 + idx)\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.date_time,\n self.now - timedelta(days=1 + idx, hours=1 + idx))\n\n def test_date_field(self):\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.date = self.date - timedelta(days=1 + idx)\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.date,\n self.date - timedelta(days=1 + idx))\n\n def test_time_field(self):\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.time = time(1 + idx, 0 + idx)\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.time, time(1 + idx, 0 + idx))\n\n def test_decimal_field(self):\n \"\"\"\n Decimal values are saved correctly\n \"\"\"\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.height = Decimal('1.%s' % (50 + idx * 7))\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.height, Decimal('1.%s' % (50 + idx * 7)))\n\n def test_float_field(self):\n people = Person.objects.order_by('pk').all()\n initial_values = {p.pk: p.float_height for p in people}\n for idx, person in enumerate(people):\n person.float_height = person.float_height * 2\n initial_values[person.pk] = person.float_height\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.float_height, initial_values[person.pk])\n\n def test_generic_ipaddress_field(self):\n people = Person.objects.order_by('pk').all()\n remote_addrs = ['127.0.0.1', '192.0.2.30', '2a02:42fe::4']\n values = {}\n for idx, person in enumerate(people):\n remote_addr = random.choice(remote_addrs)\n person.remote_addr = remote_addr\n values[person.pk] = remote_addr\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.remote_addr, values[person.pk])\n\n def test_file_field(self):\n files = ['dummy.txt', 'kitten.jpg', 'fixtures.json',\n 'dummy.png', 'users.json', 'dummy.png']\n people = Person.objects.order_by('pk').all()\n values = {}\n for idx, person in enumerate(people):\n my_file = random.choice(files)\n person.my_file = my_file\n values[person.pk] = my_file\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.my_file, values[person.pk])\n\n def test_image_field(self):\n images = ['kitten.jpg', 'dummy.png', 'users.json', 'dummy.png']\n people = Person.objects.order_by('pk').all()\n values = {}\n for idx, person in enumerate(people):\n image = random.choice(images)\n person.image = image\n values[person.pk] = image\n Person.objects.bulk_update(people)\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.image, values[person.pk])\n\n def test_custom_fields(self):\n values = {}\n people = Person.objects.all()\n people_dict = {p.name: p for p in people}\n person = people_dict['Mike']\n person.data = {'name': 'mikey', 'age': 99, 'ex': -99}\n values[person.pk] = {'name': 'mikey', 'age': 99, 'ex': -99}\n\n person = people_dict['Mary']\n person.data = {'names': {'name': []}}\n values[person.pk] = {'names': {'name': []}}\n\n person = people_dict['Pete']\n person.data = []\n values[person.pk] = []\n\n person = people_dict['Sandra']\n person.data = [{'name': 'Pete'}, {'name': 'Mike'}]\n values[person.pk] = [{'name': 'Pete'}, {'name': 'Mike'}]\n\n person = people_dict['Ash']\n person.data = {'text': 'bla'}\n values[person.pk] = {'text': 'bla'}\n\n person = people_dict['Crystal']\n values[person.pk] = person.data\n\n Person.objects.bulk_update(people)\n\n people = Person.objects.all()\n for person in people:\n self.assertEqual(person.data, values[person.pk])\n\n def test_update_fields(self):\n \"\"\"\n Only the fields in \"update_fields\" are updated\n \"\"\"\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.age += 1\n person.height += Decimal('0.01')\n Person.objects.bulk_update(people, update_fields=['age'])\n\n people2 = Person.objects.order_by('pk').all()\n for person1, person2 in zip(people, people2):\n self.assertEqual(person1.age, person2.age)\n self.assertNotEqual(person1.height, person2.height)\n\n def test_exclude_fields(self):\n \"\"\"\n Only the fields not in \"exclude_fields\" are updated\n \"\"\"\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.age += 1\n person.height += Decimal('0.01')\n Person.objects.bulk_update(people, exclude_fields=['age'])\n\n people2 = Person.objects.order_by('pk').all()\n for person1, person2 in zip(people, people2):\n self.assertNotEqual(person1.age, person2.age)\n self.assertEqual(person1.height, person2.height)\n\n def test_object_list(self):\n \"\"\"\n Pass in a list instead of a queryset for bulk updating\n \"\"\"\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n person.big_age = idx + 27\n Person.objects.bulk_update(list(people))\n\n people = Person.objects.order_by('pk').all()\n for idx, person in enumerate(people):\n self.assertEqual(person.big_age, idx + 27)\n","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":13881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"5961689","text":"\nfrom prac_06.guitar import Guitar\n\n\ndef main():\n print(\"My guitars!\")\n guitars = get_guitars_list()\n\n max_name_length = max(len(guitar.name) for guitar in guitars)\n\n print(\"\\nThese are my guitars:\")\n for i, guitar in enumerate(guitars):\n print(\"Guitar {}: {:>{}} ({}), worth $ {:10,.2f}{}\".format(i + 1, guitar.name, max_name_length, guitar.year,\n guitar.cost,\n \" (vintage)\" if guitar.is_vintage() else \"\"))\n\n\ndef get_guitars_list():\n entering_guitars = True\n guitars = []\n while entering_guitars:\n guitar_name = input(\"Name: \")\n if guitar_name == \"\":\n entering_guitars = False\n else:\n guitar_year = get_year()\n guitar_cost = get_cost()\n guitar = Guitar(guitar_name, guitar_year, guitar_cost)\n guitars.append(guitar)\n print(\"{} added.\".format(guitar))\n return guitars\n\n\ndef get_year():\n valid_input = False\n while not valid_input:\n try:\n return int(input(\"Year: \"))\n except ValueError:\n print(\"Invalid entry, must be a number\")\n\n\ndef get_cost():\n valid_input = False\n while not valid_input:\n try:\n return float(input(\"Cost: $ \"))\n except ValueError:\n print(\"Invalid entry, must be a number\")\n\n\nmain()\n","sub_path":"prac_06/guitars.py","file_name":"guitars.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"238948475","text":"\n\nfrom xai.brain.wordbase.verbs._spay import _SPAY\n\n#calss header\nclass _SPAYS(_SPAY, ):\n\tdef __init__(self,): \n\t\t_SPAY.__init__(self)\n\t\tself.name = \"SPAYS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"spay\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_spays.py","file_name":"_spays.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"586687811","text":"def intersection(arrays):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n # Your code here\n # 0(n^2)\n\n cache = dict()\n\n for arr in arrays:\n for num in arr:\n if num in cache:\n cache[num] += 1\n else:\n cache[num] = 1\n # With the counter set, if the number has a count equal to the len of the array then it means it is in all the arrays, getting the value we need\n result = [count[0] for count in cache.items() if count[1] == len(arrays)]\n\n return result\n\n\nif __name__ == \"__main__\":\n arrays = []\n\n arrays.append(list(range(1000000, 2000000)) + [1, 2, 3])\n arrays.append(list(range(2000000, 3000000)) + [1, 2, 3])\n arrays.append(list(range(3000000, 4000000)) + [1, 2, 3])\n\n print(intersection(arrays))\n","sub_path":"hashtables/ex3/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"112612887","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 20 13:26:05 2018\r\n\r\n@author: Sam\r\n\"\"\"\r\n\r\n#Populating the namespace with libraries\r\n\r\n\r\nimport os\r\nimport json\r\nimport sys\r\nimport csv\r\ntry:\r\n import urllib2 as urllib\r\nexcept ImportError:\r\n import urllib.request as urllib\r\n\r\n#The first system argument is the API key and the second is the bus line and the\r\n#third is the csv \r\n\r\nAPI_key = sys.argv[1]\r\nbus_line = sys.argv[2]\r\nbus_csv = sys.argv[3]\r\n\r\n\r\n#Set url to pull the data\r\n\r\nurl = \"http://bustime.mta.info/api/siri/vehicle-monitoring.json?key=\" +\\\r\nAPI_key + \"&VehicleMonitoringDetailLevel=calls&LineRef=\" + bus_line\r\n\r\n#Read in the data and store as a JSON\r\n\r\nresponse = urllib.urlopen(url)\r\ndata = response.read().decode('utf-8')\r\ndata = json.loads(data)\r\n\r\n#Locate latitude and longitude data within the JSON file\r\n\r\ndata = data['Siri']['ServiceDelivery']['VehicleMonitoringDelivery'][0]['VehicleActivity']\r\n\r\n#Initialize num_active_buses as the number of buses referenced in the data JSON\r\n\r\nnum_active_buses = len(data)\r\n\r\n#Create lists to wihch attribute data will be appended for each bus\r\n\r\nlat_list = []\r\nlng_list = []\r\nstp_list = []\r\nstp_stat = []\r\n\r\n#Iterate through buses and append lat, lng, stop name, and stop status to lists\r\n\r\nfor i in range(num_active_buses):\r\n location = data[i]['MonitoredVehicleJourney']['VehicleLocation']\r\n lng, lat = str(location['Longitude']), str(location['Latitude'])\r\n \r\n try:\r\n nxt_stop = data[i]['MonitoredVehicleJourney']['OnwardCalls']['OnwardCall'][0][\\\r\n 'StopPointName']\r\n \r\n stop_status = data[i]['MonitoredVehicleJourney']['OnwardCalls']\\\r\n ['OnwardCall'][0]['Extensions']['Distances']['PresentableDistance']\r\n except KeyError:\r\n nxt_stop = \"N/A\"\r\n stop_status = \"N/A\"\r\n \r\n lat_list.append(lat)\r\n lng_list.append(lng)\r\n stp_list.append(nxt_stop)\r\n stp_stat.append(stop_status)\r\n \r\n \r\n#Create list of attribute names, and write our bus data to the csv\r\n\r\n\r\nfield_names = ['Latitude', 'Longitude', 'Stop Name', 'Stop Status']\r\nwith open(bus_csv, 'w', newline = '') as csvfile:\r\n writer = csv.DictWriter(csvfile, field_names)\r\n writer.writeheader()\r\n for i in range(num_active_buses):\r\n writer.writerow({'Latitude':lat_list[i], \\\r\n 'Longitude':lng_list[i],\\\r\n 'Stop Name':stp_list[i],\\\r\n 'Stop Status':stp_stat[i]})\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n","sub_path":"HW3_sm4372/HW3_4_sm4372.py","file_name":"HW3_4_sm4372.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"302589683","text":"from __future__ import print_function\n \nfrom setuptools import setup, find_packages, Command\nfrom setuptools.command.test import test as TestCommand\n\nimport os\nimport sys\nimport urllib\n\nimport chroniker\n\nCURRENT_DIR = os.path.abspath(os.path.dirname(__file__))\n\ndef get_reqs(*fns):\n lst = []\n for fn in fns:\n for package in open(os.path.join(CURRENT_DIR, fn)).readlines():\n package = package.strip()\n if not package:\n continue\n lst.append(package.strip())\n return lst\n\nsetup(\n name = \"django-chroniker\",\n version = chroniker.__version__,\n packages = find_packages(),\n scripts = ['bin/chroniker'],\n package_data = {\n '': ['docs/*.txt', 'docs/*.py'],\n 'chroniker': [\n 'static/*/*/*.*',\n 'templates/*.*',\n 'templates/*/*.*',\n 'templates/*/*/*.*',\n 'templates/*/*/*/*.*',\n 'fixtures/*',\n 'tests/fixtures/*',\n ],\n },\n author = \"Chris Spencer\",\n author_email = \"chrisspen@gmail.com\",\n description = \"Easily control cron jobs via Django's admin.\",\n license = \"BSD\",\n url = \"https://github.com/chrisspen/django-chroniker\",\n #https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers = [\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.0',\n 'Programming Language :: Python :: 3.4',\n 'Framework :: Django',\n ],\n zip_safe = False,\n install_requires=get_reqs('pip-requirements-min-django.txt', 'pip-requirements.txt'),\n tests_require=get_reqs('pip-requirements-test.txt'),\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"333489556","text":"\"\"\"\r\n将城市与城市代码写入stations.json便于读取\r\n\"\"\"\r\n\r\nimport requests\r\nimport re\r\nimport json\r\n\r\ndef get_stations():\r\n requests.packages.urllib3.disable_warnings()\r\n url = 'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js?station_version=1.9018'\r\n response = requests.get(url,verify=False)\r\n pattern = '([\\u4e00-\\u9fa5]+)\\|([A-Z]+)'\r\n stations = re.findall(pattern, response.text)\r\n return dict(stations)\r\n\r\nwith open('stations.json', 'w') as f:\r\n json.dump(get_stations(), f)","sub_path":"12306/stations.py","file_name":"stations.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"263964204","text":"from MorcoWrapper2D.mw2_Scene import *\nfrom MorcoWrapper2D.mw2_Rect import *\nfrom MorcoWrapper2D.mw2_Image import *\nfrom MorcoWrapper2D.mw2_Text import *\nfrom ProjectPong.EndManager import *\n\n\nclass EndScene (mw2_Scene):\n\tdef __init__(self):\n\t\tmw2_Scene.__init__(self)\n\n\tdef Build(self):\n\t# background\n\t\tbackground = mw2_Image(\"background\", \"../__Resources/Pong/background.png\")\n\t\tbackground.mSize = mw2_Vector2(800, 600)\n\t\tself.AddObject(background)\n\n\t# top\n\t\ttop = mw2_GameObject(\"top\")\n\t\ttop.WorldPosition( mw2_Vector2(0, -250) )\n\t\tself.AddObject(top)\n\n\t\ttext_gameOver = mw2_Text(\n\t\t\t\"text_gameOver\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t50,\n\t\t\t\"Game Over\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.CENTER\n\t\t)\n\t\ttext_gameOver.AttachTo(top)\n\t\ttext_gameOver.LocalPosition( mw2_Vector2(0, 0) )\n\t\tself.AddObject(text_gameOver)\n\n\t\twinner = mw2_Text(\n\t\t\t\"winner\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"...\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.CENTER\n\t\t)\n\t\twinner.AttachTo(top)\n\t\twinner.LocalPosition( mw2_Vector2(0, 60) )\n\t\tself.AddObject(winner)\n\n\t# middle 1\n\t\tmiddle1 = mw2_GameObject(\"middle1\")\n\t\tmiddle1.WorldPosition( mw2_Vector2(0, -120) )\n\t\tself.AddObject(middle1)\n\n\t\tleftSide = mw2_GameObject(\"leftSide\")\n\t\tleftSide.AttachTo(middle1)\n\t\tleftSide.LocalPosition( mw2_Vector2(-320, 0) )\n\t\tself.AddObject(leftSide)\n\n\t\trightSide = mw2_GameObject(\"rightSide\")\n\t\trightSide.AttachTo(middle1)\n\t\trightSide.LocalPosition( mw2_Vector2(-100, 0) )\n\t\tself.AddObject(rightSide)\n\n\t# left side\n\t\ttext_title = mw2_Text(\n\t\t\t\"text_title\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t32,\n\t\t\t\"Credits\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_title.AttachTo(leftSide)\n\t\ttext_title.LocalPosition( mw2_Vector2(0, 0) )\n\t\tself.AddObject(text_title)\n\n\t\ttext_item1 = mw2_Text(\n\t\t\t\"text_item1\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"Background\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_item1.AttachTo(leftSide)\n\t\ttext_item1.LocalPosition( mw2_Vector2(0, 40) )\n\t\tself.AddObject(text_item1)\n\n\t\ttext_item2 = mw2_Text(\n\t\t\t\"text_item2\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"Sounds\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_item2.AttachTo(leftSide)\n\t\ttext_item2.LocalPosition( mw2_Vector2(0, 65) )\n\t\tself.AddObject(text_item2)\n\n\t\ttext_item3 = mw2_Text(\n\t\t\t\"text_item3\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"Music\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_item3.AttachTo(leftSide)\n\t\ttext_item3.LocalPosition( mw2_Vector2(0, 90) )\n\t\tself.AddObject(text_item3)\n\n\t\ttext_item4 = mw2_Text(\n\t\t\t\"text_item4\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"Icons\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_item4.AttachTo(leftSide)\n\t\ttext_item4.LocalPosition( mw2_Vector2(0, 115) )\n\t\tself.AddObject(text_item4)\n\n\t\ttext_item5 = mw2_Text(\n\t\t\t\"text_item5\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"Font\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_item5.AttachTo(leftSide)\n\t\ttext_item5.LocalPosition( mw2_Vector2(0, 140) )\n\t\tself.AddObject(text_item5)\n\n\t# right side\n\t\ttext_url1 = mw2_Text(\n\t\t\t\"text_url1\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"http://subtlepatterns.com\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_url1.AttachTo(rightSide)\n\t\ttext_url1.LocalPosition( mw2_Vector2(0, 40) )\n\t\tself.AddObject(text_url1)\n\n\t\ttext_url2 = mw2_Text(\n\t\t\t\"text_url2\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"http://www.freesound.org\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_url2.AttachTo(rightSide)\n\t\ttext_url2.LocalPosition( mw2_Vector2(0, 65) )\n\t\tself.AddObject(text_url2)\n\n\t\ttext_url3 = mw2_Text(\n\t\t\t\"text_url3\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"https://soundcloud.com\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_url3.AttachTo(rightSide)\n\t\ttext_url3.LocalPosition( mw2_Vector2(0, 90) )\n\t\tself.AddObject(text_url3)\n\n\t\ttext_url4 = mw2_Text(\n\t\t\t\"text_url4\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"http:#findicons.com\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_url4.AttachTo(rightSide)\n\t\ttext_url4.LocalPosition( mw2_Vector2(0, 115) )\n\t\tself.AddObject(text_url4)\n\n\t\ttext_url5 = mw2_Text(\n\t\t\t\"text_url5\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"http://www.dafont.com\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_url5.AttachTo(rightSide)\n\t\ttext_url5.LocalPosition( mw2_Vector2(0, 140) )\n\t\tself.AddObject(text_url5)\n\n\t# middle 2\n\t\tmiddle2 = mw2_GameObject(\"middle2\")\n\t\tmiddle2.WorldPosition( mw2_Vector2(-320, 60) )\n\t\tself.AddObject(middle2)\n\n\t\ttext_line1 = mw2_Text(\n\t\t\t\"text_line1\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"Game designed and developed by Morco.\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_line1.AttachTo(middle2)\n\t\ttext_line1.LocalPosition( mw2_Vector2(0, 0) )\n\t\tself.AddObject(text_line1)\n\n\t\ttext_line2 = mw2_Text(\n\t\t\t\"text_line2\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"It is marked as open-source software.\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_line2.AttachTo(middle2)\n\t\ttext_line2.LocalPosition( mw2_Vector2(0, 30) )\n\t\tself.AddObject(text_line2)\n\n\t\ttext_line3 = mw2_Text(\n\t\t\t\"text_line3\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t25,\n\t\t\t\"More details on www.morco.ro\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.LEFT\n\t\t)\n\t\ttext_line3.AttachTo(middle2)\n\t\ttext_line3.LocalPosition( mw2_Vector2(0, 60) )\n\t\tself.AddObject(text_line3)\n\n\t# bottom\n\t\tbottom = mw2_GameObject(\"bottom\")\n\t\tbottom.WorldPosition( mw2_Vector2(330, 220) )\n\t\tself.AddObject(bottom)\n\n\t\ttext_restart = mw2_Text(\n\t\t\t\"text_restart\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t20,\n\t\t\t\"Press \\\"R\\\" to restart game.\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.RIGHT\n\t\t)\n\t\ttext_restart.AttachTo(bottom)\n\t\ttext_restart.LocalPosition( mw2_Vector2(0, 0) )\n\t\tself.AddObject(text_restart)\n\n\t\ttext_exit = mw2_Text(\n\t\t\t\"text_exit\",\n\t\t\t\"../__Resources/Pong/Ericsson-GA628.ttf\",\n\t\t\t20,\n\t\t\t\"Press \\\"ESC\\\" to exit.\",\n\t\t\tmw2_Vector4(210, 184, 137, 255),\n\t\t\tmw2_Text.Align.RIGHT\n\t\t)\n\t\ttext_exit.AttachTo(bottom)\n\t\ttext_exit.LocalPosition( mw2_Vector2(0, 30) )\n\t\tself.AddObject(text_exit)\n","sub_path":"full_project/2D/3. PyGame [Python2]/ProjectPong/EndScene.py","file_name":"EndScene.py","file_ext":"py","file_size_in_byte":6481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"100236582","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# michael a.g. aïvázis \n# (c) 1998-2023 all rights reserved\n\n\ndef test():\n \"\"\"\n Exercise the common use case with a cpu timer\n \"\"\"\n # access the timer bindings\n from pyre.extensions.pyre.timers import ProcessTimer\n\n # make a timer\n t = ProcessTimer(name=\"tests.timer\")\n # start it\n t.start()\n\n # do something\n s = sum(range(10**6))\n\n # stop the timer\n t.stop()\n\n # get the journal\n import journal\n # make a channel\n channel = journal.debug(name=\"pyre.timers\")\n # activate it\n # channel.activate()\n # log the elapsed time\n channel.log(f\"elapsed: {t.ms()}ms\")\n\n # all done\n return\n\n\n# main\nif __name__ == \"__main__\":\n # run the test\n test()\n\n\n# end of file\n","sub_path":"tests/pyre.ext/timers/process_timer_example.py","file_name":"process_timer_example.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"51497659","text":"import json\n\nfrom solc import compile_source\nfrom web3 import Web3\n\nfrom framework.utils.Ethereum.Web3Handler import Web3Handler\n\n\nclass ContractHandler(Web3Handler):\n def __init__(self, address=None, abi_path=None):\n super().__init__()\n # Load contract configuration\n with open(abi_path) as contract_definition:\n contract_json = json.load(contract_definition)\n abi = contract_json['abi']\n # self.abi = json.load(contract_definition)\n\n self.address = self.web3.toChecksumAddress(address)\n self.contract = self.web3.eth.contract(address=self.address, abi=abi)\n\n def getBalance(self):\n wei_balance = self.web3.eth.getBalance(self.address)\n ether_balance = Web3.fromWei(wei_balance, unit='ether')\n return ether_balance\n\n @staticmethod\n def compile_solidity(name, source):\n contract_source_code = source\n compiled_sol = compile_source(contract_source_code)\n # SuperSimpleSale\n contract_interface = compiled_sol[':{}'.format(name)]\n\n abi = contract_interface['abi']\n bytecode = contract_interface['bin']\n\n return abi, bytecode\n","sub_path":"framework/utils/Ethereum/ContractHandler.py","file_name":"ContractHandler.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"165944832","text":"#coding=utf-8\n\nimport unittest\n\n\"\"\"\n\nData Stream Median ( -- leetcode : 295. Fine Mediam from Data Stream)\n\nNumbers keep coming, return the median of numbers at every time a new number added.\n\nHave you met this question in a real interview? Yes\nClarification\nWhat's the definition of Median?\n- Median is the number that in the middle of a sorted array. If there are n numbers in a sorted array A, the median is\nA[(n - 1) / 2]. For example, if A=[1,2,3], median is 2. If A=[1,19], median is 1.\n\nExample\nFor numbers coming list: [1, 2, 3, 4, 5], return [1, 1, 2, 2, 3].\n\nFor numbers coming list: [4, 5, 1, 3, 2, 6, 0], return [4, 4, 4, 3, 3, 3, 3].\n\nFor numbers coming list: [2, 20, 100], return [2, 2, 20].\n\nChallenge\nTotal run time in O(nlogn).\n\nTags\nLintCode Copyright Heap Priority Queue Google\nRelated Problems\nHard Sliding Window Median 20 %\nEasy Median 23 %\nHard Median of two Sorted Arrays\n\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param nums: A list of integers.\n @return: The median of numbers\n \"\"\"\n def medianII(self, nums):\n # write your code here\n if not nums:\n return []\n import heapq\n result = []\n min_heap = []\n max_heap = []\n odd = False\n for num in nums:\n odd = not odd\n if odd: # also need to check here\n if min_heap:\n tmp = min_heap[0]\n if num > tmp:\n heapq.heappop(min_heap)\n heapq.heappush(max_heap, -tmp)\n heapq.heappush(min_heap, num)\n else:\n heapq.heappush(max_heap, -num)\n else:\n heapq.heappush(max_heap, -num)\n else:\n tmp = - max_heap[0]\n if tmp > num:\n heapq.heappop(max_heap)\n heapq.heappush(max_heap, -num)\n heapq.heappush(min_heap, tmp)\n else:\n heapq.heappush(min_heap, num)\n result.append(-max_heap[0])\n return result\n\n\n\n\nclass SolutionTester(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_case1(self):\n nums = [1, 2, 3, 4, 5]\n answer = [1, 1, 2, 2, 3]\n result = self.sol.medianII(nums)\n self.assertEqual(answer, result)\n\n def test_case2(self):\n nums = [4, 5, 1, 3, 2, 6, 0]\n answer = [4, 4, 4, 3, 3, 3, 3]\n result = self.sol.medianII(nums)\n self.assertEqual(answer, result)\n\n def test_case3(self):\n nums = [2, 20, 100]\n answer = [2, 2, 20]\n result = self.sol.medianII(nums)\n self.assertEqual(answer, result)\n\n\ndef main():\n suite = unittest.TestLoader().loadTestsFromTestCase(SolutionTester)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n#-*- coding:utf-8 -*-\n","sub_path":"mjbeto/data_stream_median.py","file_name":"data_stream_median.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"135689267","text":"from django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Daily\nfrom django.utils import timezone\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Q\nfrom django.contrib.auth.models import User\n\n\ndef index(request):\n return render(request, 'daily/index.html', {})\n\n@login_required\ndef show_entries(request):\n latest_daily_list = Daily.objects.order_by('-date')[:10]\n context = {'daily_list': latest_daily_list}\n return render(request, 'daily/entries.html', context)\n\n@login_required\ndef create_article(request):\n if request.method == 'POST':\n d = Daily(title=request.POST['title'], text=request.POST['text'], date=timezone.now(),\n user_id=request.user.id, user_name=request.user.username)\n d.save()\n return HttpResponseRedirect(reverse('result'))\n else:\n return render(request, 'daily/post.html', {})\n\n@login_required\ndef delete(request, daily_id):\n daily = get_object_or_404(Daily, pk=daily_id)\n if request.user.id != daily.user_id:\n return render(request, 'daily/detail.html', {'daily': daily, 'error_message': '削除する権限がありません。'})\n if request.method == 'POST':\n daily.delete()\n return HttpResponseRedirect(reverse('result'))\n else:\n return render(request, 'daily/delete.html', {'daily': daily})\n\n@login_required\ndef edit(request, daily_id):\n daily = get_object_or_404(Daily, pk=daily_id)\n if request.user.id != daily.user_id:\n return render(request, 'daily/detail.html', {'daily': daily, 'error_message': '編集する権限がありません'})\n if request.method == 'POST':\n daily.title = request.POST['title']\n daily.text = request.POST['text']\n daily.date = timezone.now()\n daily.save()\n return HttpResponseRedirect(reverse('result'))\n else:\n return render(request, 'daily/edit.html', {'daily': daily})\n\n@login_required\ndef result(request):\n return render(request, 'daily/result.html', {})\n\n@login_required\ndef detail(request, daily_id):\n daily = get_object_or_404(Daily, pk=daily_id)\n if request.method == 'POST':\n daily.comment_set.create(text=request.POST['comment'], pub_date=timezone.now(),\n user_name=request.user.username, user_id=request.user.id)\n daily.save()\n return render(request, 'daily/detail.html', {'daily': daily})\n else:\n return render(request, 'daily/detail.html', {'daily': daily})\n\n@login_required\ndef search(request):\n if request.method == 'POST':\n keyword = request.POST['keyword']\n daily_list = Daily.objects.filter(Q(title__contains=keyword) |\n Q(text__contains=keyword) |\n Q(user_name__contains=keyword))[:5]\n context = {'daily_list': daily_list}\n return render(request, 'daily/entries.html', context)\n else:\n return render(request, 'daily/search.html', {})\n\n@login_required\ndef show_users(request):\n users = User.objects.all()\n context = {'users': users}\n return render(request, 'daily/users.html', context)\n\n@login_required\ndef show_user_page(request, user_id):\n daily_list = Daily.objects.filter(user_id=user_id).order_by('-date')[:10]\n user = User.objects.get(id=user_id)\n context = {'daily_list': daily_list, 'userx': user}\n return render(request, 'daily/user_profile.html', context)","sub_path":"daily/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"170538705","text":"from os import system as st\nst('cls')\n\nst('cls')\nlanche = []\nlanche.append('suco') # o método append adiciona um elemento no final\nlanche.append('pizza')\nlanche.append('hanburguer')\nlanche.append('pudim')\nlanche[2] = 'refi' # o hamburguer será substituido pelo refri\nprint(lanche)\nlanche = []\n# inserir elementos nas posições desejadas\nlanche.insert(0, 'refri')\nprint(lanche)\n\ncomida = []\ncomida.append(str(input('Digite uma comida: ')))\nfor c in comida: \n print(c)\n\n# remover elementos\n\n# remove um elemento por item\n# del lanche[3]\n\n# remove elemento no final ou a onde quiser\n# lanche.pop()\n# lanche.pop(0)\n\n# remove um elemento por conteúdo\nif 'pizza' in lanche:\n pass\n lanche.remove('pizza')\nelse:\n print('não foi encontrada a pizza')\nfrom random import *\nlista = list.sort(range(randint(0, 6)))\nfor l in lista:\n print(l)\n\n\nvalores = []\nvalores.append(1)\nvalores.append(3)\nvalores.append(10)\nvalores.append(6)\n\n# ordem reversa de números\nvalores.sort(reverse=True)\n\nprint(valores)\n# números ordenados\nvalores.sort()\nprint(f'Os valores ordenados são: {valores}')\nprint(f'O tamanho da lista de valores é: {len(valores) + 1}')\n\n# remover um número na lista (valores)\nif 4 in valores:\n valores.remove(4)\n print(f'A nova lista de valores são: {valores}')\nelif 3 in valores:\n valores.remove(3)\n print(f'A nova lista de valores são: {valores}')\nelse:\n print('O número 4 não foi encontrado')\n\nval = list()\nval.append(3)\nval.append(4)\nval.append(5)\nval.append(10)\n\nprint('A nova lista é:', end=' ')\nfor v in val:\n print(v, end=' ')\n# for v in val:\n# print(v, end=' ')\n\nfor pos, v in enumerate(val):\n print(f'\\nO valor {v} ficou na {pos} posição')\n\n\nval = list()\nfor c in range(1, 10+1):\n val.append(int(input(f'Digite o {c} valor: ')))\nprint('\\n')\nfor pos, v in enumerate(val):\n print(f'O valor {v} ficou na {pos}a posição')\n\na = [1, 2, 4.4]\nb = a\nprint(f'Lista A: {a}')\nprint(f'Lista B: {b}')\na[2] = 4\n# b[3] = 5\nprint(f'Nova lista de A: {a}')\nprint(f'Nova lista de B: {b}')\n\n# cópia\nb = a[:]\na[1] = 4\nprint(f'Novo B: {b}')\nprint(f'Novo A: {a}')\n\"\"\"\n\"\"\"\ndados = []\ndados.append('Maria')\ndados.append(4)\ndados.append('Gabriel')\ndados.append(14)\ndados.append('Júlia')\ndados.append(20)\n\npessoas = []\n# cópia completa de uma lista\n# pessoas.append(dados[:])\npessoas = [['Maria', 25], ['Gabriel', 14], ['Júlia', 20]]\n# print(dados)\n# print(pessoas)\n\nprint(f'A {pessoas[0][0]} tem {pessoas[0][1]} anos')\nprint(f'O {pessoas[1][0]} tem {pessoas[1][1]} anos')\nprint(f'A {pessoas[2][0]} tem {pessoas[2][1]} anos')\n\nprint('''[1] Ver Maria:\n[2] Ver gabriel:\n[3] Ver Júlia: ''')\nresposta = int(input('deseja ver qual pessoa: '))\n\nif resposta == 1:\n if pessoas[0][1] > 20:\n print(f'A {pessoas[0][0]} está adulta')\n else:\n print(f'A {pessoas[0][0]} não está adulta')\nelif resposta == 2:\n if pessoas[1][1] > 20:\n print(f'O {pessoas[1][0]} está adulto')\n else:\n print(f'O {pessoas[1][0]} não está adulto')\nelif resposta == 3:\n if pessoas[2][1] > 20:\n print(f'A {pessoas[2][0]} está adulta')\n else:\n print(f'A {pessoas[2][0]} não está adulta')\n\nteste = []\ngalera = []\nteste.append('Gabriel')\nteste.append(25)\nprint(teste)\ngalera.append(teste[:])\nprint(galera)\nprint('-='*30)\nteste[0] = 'Júlia'\nteste[1] = 20\nprint(teste)\nprint(galera)\nprint('-='*30)\n\nturma = []\nturma = [\n [\n ['Biscui', 9], ['Eurico', 30], ['Gustavo', 14], \n ['Maria', 32], ['Fulano', 22], \n ['Siclano', 43], ['Carlos', 20], ['Júlio', 33], \n ['Alice', 8], ['Kari', 20], ['Cassandra', 27]\n \n ]\n ]\nfor t in turma:\n print(t[0][0])\n print(t[1][0])\n print(t[2][0])\n print(t[3][0])\n print(t[4][0])\n print(t[5][0])\n print(t[6][0])\n print(t[7][0])\n print(t[8][0])\n print(t[9][0])\n print(t[10][0])\n\nif 9 in turma[0][1]:\n print('temos uma criança em nossa turma')\nelse:\n print('Não há evidência de criança nesta turma')\n\ngalera = []\ndado = []\nbebe = crianca = adolecente = adulto = idoso = 0\nfor c in range(1, 5+1):\n dado.append(str(input(f'Digite nome da {c}a pessoa: ')))\n dado.append(int(input(f'Digite a idade da {c}a pessoa: ')))\n galera.append(dado[:])\n dado.clear()\nprint(dado)\nprint(galera)\n\nfor p in galera:\n if p[1] >= 6:\n crianca += 1\n elif p[1] >= 4:\n bebe += 1\n elif p[1] >= 15:\n adolecente += 1\n elif p[1] >=21:\n adulto += 1\n elif p[1] >= 60:\n idoso += 1\n\n\nprint(f'foi {bebe} bebê')\nprint(f'Foi {crianca} criança')\nprint(f'Foi {adolecente} adolecente(a)')\nprint(f'Foi {adulto} adulto(a)')\nprint(f'Foi {idoso} idoso(a)')","sub_path":"Arquivos em PYTHON/Aulas/Youtube/Curso em Vídeo/Listas.py","file_name":"Listas.py","file_ext":"py","file_size_in_byte":4686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"627643769","text":"import numpy as np\nimport csv\n\ndef scale_3_by_4_file(filename, outfile_name=\"scaled.out\" ):\n\n f = open(filename)\n \n outfile = open(outfile_name,\"w\")\n \n for line in f:\n\n fields = line.strip().split(',')\n\n print >> outfile, (\"%s,%s,%f\") % (fields[0], fields[1], ((float(fields[2]))/(float(fields[3]))))\n \n outfile.close()\n\n","sub_path":"data_manipulation/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"88193285","text":"import numpy as np\nimport pdb\n\ndef recoverh36m_3d(gt,pred):\n\tjoint_to_ignore = np.array([22, 23, 24, 25, 26, 27])\n\tjoint_equal = np.array([12, 7, 9, 12, 2, 0])\n\tunchange_joint=np.array([28,29,30,31]) # corresponding to original joints: 0,1,6,11\n\ttem=np.zeros([gt.shape[0],gt.shape[1],len(joint_to_ignore)+len(unchange_joint),gt.shape[-1]])\n\t#pdb.set_trace()\n\tpred_3d=np.concatenate((pred, tem), axis=2)\n\tpred_3d[:,:,joint_to_ignore]=pred_3d[:,:,joint_equal]\n\tpred_3d[:,:,unchange_joint]=gt[:,:,unchange_joint]\n\n\treturn pred_3d\n","sub_path":"utils/recoverh36m_3d.py","file_name":"recoverh36m_3d.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"145999459","text":"'''\nexpEYES program\nAuthor : Ajith Kumar B.P, bpajith@gmail.com\nLicense : GNU GPL version 3\n'''\nimport gettext\ngettext.bindtextdomain(\"expeyes\")\ngettext.textdomain('expeyes')\n_ = gettext.gettext\n\nimport expeyes.eyes17 as eyes, expeyes.eyeplot17 as eyeplot, expeyes.eyemath17 as eyemath, time, sys, math\nVER = sys.version[0]\nif VER == '3':\n\tfrom tkinter import *\nelse:\n\tfrom Tkinter import *\n\nWIDTH = 600 # width of drawing canvas\nHEIGHT = 400 # height \n\nclass SR04:\n\ttv = [ [], [] ]\t\t\t# Lists for Readings\n\tTIMER = 5\t\t\t# Time interval between reads\n\tMINY = 0\t\t\t# Voltage range\n\tMAXY = 150\n\trunning = False\n\tMAXTIME = 10\n\n\tdef fit_curve(self):\n\t\tfa = eyemath.fit_dsine(self.tv[0], self.tv[1])\n\t\tif fa != None:\n\t\t\tpa = fa[1]\n\t\t\tg.line(self.tv[0], fa[0],1)\n\t\t\tself.msg(_('Angular velocity = %5.2f rad/sec. Damping Factor = %5.3f')%(pa[1], pa[4]))\n\t\telse:\n\t\t\tself.msg(_('Failed to fit data'))\n\n\tdef xmgrace(self):\n\t\tif self.running == True:\n\t\t\treturn\n\t\teyeplot.grace([self.tv])\n\n\tdef start(self):\n\t\tself.running = True\n\t\tself.index = 0\n\t\tself.tv = [ [], [] ]\n\t\ttry:\n\t\t\tself.MAXTIME = int(DURATION.get())\n\t\t\tg.setWorld(0, self.MINY, self.MAXTIME, self.MAXY,_('Time'),_('Distance'))\n\t\t\tDur.config(state=DISABLED)\n\t\t\tself.msg(_('Starting the Measurements'))\n\t\t\troot.after(self.TIMER, self.update)\n\t\texcept:\n\t\t\tself.msg(_('Failed to Start'))\n\n\tdef stop(self):\n\t\tself.running = False\n\t\tDur.config(state=NORMAL)\n\t\tself.msg(_('User Stopped the measurements'))\n\n\tdef update(self):\n\t\tif self.running == False:\n\t\t\treturn\n\t\tt,v = p.sr04_distance_time() # SR04 measurement\n\t\tif len(self.tv[0]) == 0:\n\t\t\tself.start_time = t\n\t\t\telapsed = 0\n\t\telse:\n\t\t\telapsed = t - self.start_time\n\t\tself.tv[0].append(elapsed)\n\t\tself.tv[1].append(v)\n\t\tif len(self.tv[0]) >= 2:\n\t\t\tg.delete_lines()\n\t\t\tg.line(self.tv[0], self.tv[1])\n\t\tif elapsed > self.MAXTIME:\n\t\t\tself.running = False\n\t\t\tDur.config(state=NORMAL)\n\t\t\tself.msg(_('Completed the Measurements'))\n\t\t\treturn \n\t\troot.after(self.TIMER, self.update)\n\n\tdef save(self):\n\t\ttry:\n\t\t\tfn = filename.get()\n\t\texcept:\n\t\t\tfn = 'sr04-dist.dat'\n\t\tp.save([self.tv],fn)\n\t\tself.msg(_('Data saved to %s')%fn)\n\n\tdef clear(self):\n\t\tif self.running == True:\n\t\t\treturn\n\t\tself.tv = [ [], [] ]\n\t\tg.delete_lines()\n\t\tself.msg(_('Cleared Data and Trace'))\n\n\tdef msg(self,s, col = 'blue'):\n\t\tmsgwin.config(text=s, fg=col)\n\np = eyes.open()\nroot = Tk()\nCanvas(root, width = WIDTH, height = 5).pack(side=TOP) # Some space at the top\ng = eyeplot.graph(root, width=WIDTH, height=HEIGHT, bip=False)\t# make plot objects using draw.disp\nsr = SR04()\n\ncf = Frame(root, width = WIDTH, height = 10)\ncf.pack(side=TOP, fill = BOTH, expand = 1)\n\n\nb3 = Label(cf, text = _('Digitize for'))\nb3.pack(side = LEFT, anchor = SW)\nDURATION = StringVar()\nDur =Entry(cf, width=5, bg = 'white', textvariable = DURATION)\nDURATION.set('10')\nDur.pack(side = LEFT, anchor = SW)\nb3 = Label(cf, text = _('Seconds.'))\nb3.pack(side = LEFT, anchor = SW)\n\ncf = Frame(root, width = WIDTH, height = 10)\ncf.pack(side=TOP, fill = BOTH, expand = 1)\nb1 = Button(cf, text = _('START'), command = sr.start)\nb1.pack(side = LEFT, anchor = N)\nb1 = Button(cf, text = _('STOP'), command = sr.stop)\nb1.pack(side = LEFT, anchor = N)\nb1 = Button(cf, text = _('FIT'), command = sr.fit_curve)\nb1.pack(side = LEFT, anchor = N)\nb4 = Button(cf, text = _('CLEAR'), command = sr.clear)\nb4.pack(side = LEFT, anchor = N)\nb1 = Button(cf, text = _('Xmgrace'), command = sr.xmgrace)\nb1.pack(side = LEFT, anchor = N)\nb3 = Button(cf, text = _('SAVE to'), command = sr.save)\nb3.pack(side = LEFT, anchor = N)\nfilename = StringVar()\ne1 =Entry(cf, width=15, bg = 'white', textvariable = filename)\nfilename.set('sr04-dist.dat')\ne1.pack(side = LEFT)\nb5 = Button(cf, text = _('QUIT'), command = sys.exit)\nb5.pack(side = RIGHT, anchor = N)\n\nmf = Frame(root, width = WIDTH, height = 10)\nmf.pack(side=TOP)\nmsgwin = Label(mf,text=_('Message'), fg = 'blue')\nmsgwin.pack(side=LEFT, anchor = S, fill=BOTH, expand=1)\n\n\neyeplot.pop_image('pics/sr04-dist.png', _('Distance by SR04 Echo module'))\nroot.title(_('Distance by Ultrasound Echo, SR04'))\nroot.mainloop()\n\n","sub_path":"old_expeyes_code/sr04-dist.py","file_name":"sr04-dist.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"565784176","text":"import numpy as np\n\n\ndef __find_sub_words_seq(words, sub_words):\n i, li, lj = 0, len(words), len(sub_words)\n while i + lj <= li:\n j = 0\n while j < lj:\n if words[i + j] != sub_words[j]:\n break\n j += 1\n if j == lj:\n return i\n i += 1\n return -1\n\n\ndef __label_words_with_terms(words, terms, label_val_beg, label_val_in, x):\n for term in terms:\n # term_words = term.lower().split(' ')\n term_words = term.split(' ')\n pbeg = __find_sub_words_seq(words, term_words)\n if pbeg == -1:\n print(words)\n print(terms)\n print()\n continue\n x[pbeg] = label_val_beg\n for p in range(pbeg + 1, pbeg + len(term_words)):\n x[p] = label_val_in\n\n\ndef label_sentence(words, aspect_terms):\n label_val_beg, label_val_in = 1, 2\n\n x = np.zeros(len(words), np.int32)\n __label_words_with_terms(words, aspect_terms, label_val_beg, label_val_in, x)\n return x\n\n\ndef get_machine_name():\n import socket\n hostname = socket.gethostname()\n dot_pos = hostname.find('.')\n return hostname[:dot_pos] if dot_pos > -1 else hostname[:]\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"61495825","text":"import flask\n\nfrom canonicalwebteam.blog import wordpress_api as api\nfrom canonicalwebteam.blog import logic\nfrom canonicalwebteam.blog.common_view_logic import (\n get_index_context,\n get_article_context,\n)\n\n\ndef build_blueprint(blog_title, tag_ids, tag_name, excluded_tags=[]):\n blog = flask.Blueprint(\n \"blog\", __name__, template_folder=\"/templates\", static_folder=\"/static\"\n )\n\n @blog.route(\"/\")\n def homepage():\n page_param = flask.request.args.get(\"page\", default=1, type=int)\n\n try:\n if page_param == \"1\":\n featured_articles, total_pages = api.get_articles(\n tags=tag_ids,\n tags_exclude=excluded_tags,\n page=page_param,\n sticky=\"true\",\n per_page=3,\n )\n featured_article_ids = [\n article[\"id\"] for article in featured_articles\n ]\n articles, total_pages = api.get_articles(\n tags=tag_ids,\n tags_exclude=excluded_tags,\n exclude=featured_article_ids,\n page=page_param,\n )\n else:\n articles, total_pages = api.get_articles(\n tags=tag_ids, page=page_param\n )\n featured_articles = []\n except Exception:\n return flask.abort(502)\n\n context = get_index_context(page_param, articles, total_pages)\n\n return flask.render_template(\"blog/index.html\", **context)\n\n @blog.route(\"/feed\")\n def feed():\n try:\n feed = api.get_feed(tag_name)\n except Exception as e:\n print(e)\n return flask.abort(502)\n\n right_urls = logic.change_url(\n feed, flask.request.base_url.replace(\"/feed\", \"\")\n )\n\n right_title = right_urls.replace(\"Ubuntu Blog\", blog_title)\n\n return flask.Response(right_title, mimetype=\"text/xml\")\n\n @blog.route(\n '///'\n '/'\n )\n @blog.route('///')\n @blog.route('//')\n def article_redirect(slug, year, month=None, day=None):\n return flask.redirect(flask.url_for(\".article\", slug=slug))\n\n @blog.route(\"/\")\n def article(slug):\n try:\n articles = api.get_article(slug, tag_ids)\n except Exception:\n return flask.abort(502)\n\n if not articles:\n flask.abort(404, \"Article not found\")\n\n context = get_article_context(articles, tag_ids)\n\n return flask.render_template(\"blog/article.html\", **context)\n\n @blog.route(\"/latest-news\")\n def latest_news():\n try:\n latest_pinned_articles = api.get_articles(\n tags=tag_ids,\n exclude=excluded_tags,\n page=1,\n per_page=1,\n sticky=True,\n )\n # check if the number of returned articles is 0\n if len(latest_pinned_articles[0]) == 0:\n latest_articles = api.get_articles(\n tags=tag_ids,\n exclude=excluded_tags,\n page=1,\n per_page=4,\n sticky=False,\n )\n else:\n latest_articles = api.get_articles(\n tags=tag_ids,\n exclude=excluded_tags,\n page=1,\n per_page=3,\n sticky=False,\n )\n except Exception:\n return flask.jsonify({\"Error\": \"An error ocurred\"}), 502\n\n return flask.jsonify(\n {\n \"latest_articles\": latest_articles,\n \"latest_pinned_articles\": latest_pinned_articles,\n }\n )\n\n return blog\n","sub_path":"canonicalwebteam/blog/flask/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"181558586","text":"import numpy as np\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, LSTM, Input\n\n#1. 데이터\na = np.array(range(1,11))\nsize = 5 #time_steps=4\n\ndef split_x(seq, size):\n bbb = []\n for i in range(len(seq)-size+1):\n subset = seq[i : (i+size)]\n \n bbb.append([item for item in subset])\n print(type(bbb))\n return np.array(bbb)\n\ndataset = split_x(a,size)\n\nprint(dataset)\nprint(dataset.shape)\nprint(type(dataset))\n\nx=dataset[:,0:4]\ny=dataset[:,4]\n\nx= np.reshape(x,(6,4,1))\n#x=x.reshape(6,4,1)\n\n\n\nfrom keras.models import load_model\nmodel = load_model('./model/save_keras44.h5')\n\nfrom keras.layers import Dense\nmodel.add(Dense(1,name='dense_x'))\nmodel.summary()\n\n'''\n\nmodel.compile(loss='mse',optimizer='adam',metrics=['mse'])\nmodel.fit(x,y,epochs=4000)\n\nloss,mse= model.evaluate(x,y)\ny_predict = model.predict(x)\nprint('loss : ', loss)\nprint('mse : ', mse)\nprint('y_predict : ', y_predict)\n'''","sub_path":"keras45_.py","file_name":"keras45_.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"380589741","text":"from Myro import *\nfrom Graphics import *\nimport colorsys\n\n\n\nblack = makeColor(0,0,0)\nwhite = makeColor(255,255,255)\n\ndef learningToPhoto():\n blackPixels = []\n whitePixels = []\n xBlack = 0\n yBlack = 0\n #manualCamera(1024,1000,100)\n I = takePicture()\n pic = Picture(I)\n for x in range(pic.width):\n for y in range(pic.height):\n orderedPair = {}\n \n coloredPixel = pic.getRGBA(x,y)\n #orderedPair.append([x,y])\n \n if coloredPixel[0] >= 110 and coloredPixel[0] <= 255 and coloredPixel[1] <=120 and coloredPixel[2] <=80:\n #setPixel(orderedPair,black)\n pic.setRGBA(x,y,0,0,0,255)\n blackPixels.append([x,y])\n xBlack += x\n yBlack += y\n #orderedPair[str (x) + \" \" + str (y)]\n else:\n pic.setRGBA(x,y,255,255,255,255)\n whitePixels.append([x,y])\n #setPixel(orderedPair[0],orderedPair[1],white)\n show(pic,\"blob_image\")\n show(I, \"original_image\")\n xCentroid = xBlack/len(blackPixels)\n yCentroid = yBlack/len(blackPixels)\n\n #learningToPhoto()\n #print(blackPixels)\n print(\"mass of centroid\", len(blackPixels))\n print(\"AVERAGE X VALUE\", xBlack/len(blackPixels) )\n print(\"AVERAGE Y VALUE\", yBlack/len(blackPixels) )\n win = getWindow(\"blob_image\")\n win.draw(Circle((xCentroid,yCentroid),5))\n \n \n\n\n\ndef colorTest():\n origPic = takePicture()\n #yuvValues = []\n yuvPic = Picture(origPic)\n for x in range(origPic.width):\n for y in range(origPic.height):\n rgbStuffs = origPic.getRGB(x,y)\n #print(rgbStuffs)\n yuvValues=colorsys.rgb_to_yiq(rgbStuffs[0],rgbStuffs[1],rgbStuffs[2])\n print(yuvValues)\n yuvPic.setRGB(x,y,yuvValues[0],yuvValues[1],yuvValues[2])\n show(yuvPic,\"blob_image\")\n show(origPic, \"original_image\")\n #xCentroid = xBlack/len(blackPixels)\n #yCentroid = yBlack/len(blackPixels)","sub_path":"All New and Cleaned Code/cameraBlob-master/OLD/JEFFFFFFF.py","file_name":"JEFFFFFFF.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"271207542","text":"from functions import *\nimport sys\n\nhmm_file = sys.argv[1]\nsequence_file = sys.argv[2]\n\n# getting hmm values from file\nq, obs_symb, a, b, Pi, n, m, T = create_hmm(hmm_file)\n\n# mapping sequence symbols to numbers.\nobs_symb = enum_symbols(obs_symb)\n\n# getting sequence sets from file\nobs_seq_set = get_obs_seq_sets(sequence_file)\n\n# P(O|Lambda) list for all observation sequences\np_fwd = []\n\nfor seq in obs_seq_set:\n p_fwd.append(forward(a, b, Pi, obs_symb, seq, n))\n\n\n#print viterbi(a,b,Pi,obs_symb,seq,n)\n\n#print(b)\n\nfor i in p_fwd:\n print(i)\n\n","sub_path":"recognize.py","file_name":"recognize.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"368111236","text":"\"\"\" This file contains the necessary functionality to return a likelihood p(D|θ) \"\"\"\nimport numpy as np\nfrom pf_likelihood import pf_likelihood\nfrom functools import reduce\nfrom operator import mul\nfrom progressbar import ProgressBar\nfrom itertools import chain\n\ndef likelihood_experiment(location_sigma: float, location_d: float, mixture_d: float,\n flip_p: float, num_particles: int = 1000, print_progress=False):\n dots = np.loadtxt('../data/dots.txt', delimiter=',', usecols=(2,3,1))\n\n # change how many sessions are in each task\n taskLength = [18, 16, 17, 16, 17, 16]\n correctTaskLength = [23, 27, 28]\n\n within_nums = [1, 5, 10, 20, 50, 100]\n\n likelihoods = [[1 for j in range(sessLength)] for sessLength in correctTaskLength]\n\n for i, tasknum in enumerate(correctTaskLength):\n if print_progress:\n print(f'Running task {i+1} / {len(correctTaskLength)}')\n total_len = 0\n withins = np.zeros(len(within_nums))\n\n all_preds = []\n\n if print_progress:\n bar = ProgressBar(max_value=correctTaskLength[i]-1)\n\n # if you only want sessions with correct answer, use correctTaskLength, otherwise, use taskLength\n for j in range(correctTaskLength[i]):\n likelihoods[i][j] = pf_likelihood(\n dots, location_sigma, location_d, mixture_d, flip_p, int(num_particles),\n f'../data/sessionsCorrect/task{i+1}/{j+1}.txt'\n )\n if print_progress:\n bar.update(j)\n\n overall_likelihood = reduce(mul, chain.from_iterable(likelihoods), 1)\n if print_progress:\n print(overall_likelihood)\n return overall_likelihood\n\nif __name__ == '__main__':\n # parameters (these should be tuned!)\n location_sigma = 0.1 # p(click | particle) = N(click; particle, σ²I) (how widely should we search around a particle for the next click)\n location_d = 0.02 # size of diffusion step for location\n mixture_d = 0.05 # size of diffusion step for mixture weights\n flip_p = 0.45 # probability of type flipping in diffusion step\n num_particles = 1000 # number of particles\n\n likelihood_experiment(location_sigma, location_d, mixture_d, flip_p)","sub_path":"hmm/tuning_experiment.py","file_name":"tuning_experiment.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"71576005","text":"#!/usr/bin/env python3\n\"\"\"\nScript for managing OIDC.\n\"\"\"\nimport os\nimport boto3\nimport json\nimport sys\n\n\nHERE = os.path.abspath(os.path.dirname(__file__))\nsys.path.insert(0, HERE)\nfrom utils import get_secrets, MOCK_ENDPOINT, DEFAULT_CLIENT\n\n\ndef main():\n print(\"Bootstrapping OIDC config\")\n\n # Get the secrets.\n secrets = get_secrets()\n\n # Write the oidc orchestration file.\n provider1_info = {\n \"authNamePrefix\": \"test1\",\n \"issuer\": secrets['oidc_issuer_1_uri'],\n \"clientId\": DEFAULT_CLIENT,\n \"audience\": DEFAULT_CLIENT,\n \"authorizationClaim\": \"foo\",\n\n }\n providers = json.dumps([provider1_info], separators=(',',':'))\n\n data = {\n \"id\": \"oidc-repl0\",\n \"auth_key\": \"secret\",\n \"login\": \"bob\",\n \"name\": \"mongod\",\n \"password\": \"pwd123\",\n \"members\": [{\n \"procParams\": {\n \"ipv6\": \"NO_IPV6\" not in os.environ,\n \"bind_ip\": \"0.0.0.0,::1\",\n \"logappend\": True,\n \"port\": 27017,\n \"setParameter\": {\n \"enableTestCommands\": 1,\n \"authenticationMechanisms\": \"SCRAM-SHA-256,MONGODB-OIDC\",\n \"oidcIdentityProviders\": providers\n }\n }\n }]\n }\n\n provider1_info['matchPattern'] = \"test_user1\"\n provider2_info = {\n \"authNamePrefix\": \"test2\",\n \"issuer\": secrets['oidc_issuer_2_uri'],\n \"clientId\": DEFAULT_CLIENT,\n \"audience\": DEFAULT_CLIENT,\n \"authorizationClaim\": \"bar\",\n \"matchPattern\": \"test_user2\",\n }\n providers = [provider1_info, provider2_info]\n providers = json.dumps(providers, separators=(',',':'))\n data['members'].append({\n \"procParams\": {\n \"ipv6\": \"NO_IPV6\" not in os.environ,\n \"bind_ip\": \"0.0.0.0,::1\",\n \"logappend\": True,\n \"port\": 27018,\n \"setParameter\": {\n \"enableTestCommands\": 1,\n \"authenticationMechanisms\": \"SCRAM-SHA-256,MONGODB-OIDC\",\n \"oidcIdentityProviders\": providers\n }\n },\n \"rsParams\": {\n \"priority\": 0\n }\n })\n\n orch_file = os.path.abspath(os.path.join(HERE, '..', 'orchestration', 'configs', 'replica_sets', 'auth-oidc.json'))\n with open(orch_file, 'w') as fid:\n json.dump(data, fid, indent=4)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":".evergreen/auth_oidc/oidc_write_orchestration.py","file_name":"oidc_write_orchestration.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"630593183","text":"#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 19:46, 09/03/2020 %\n# %\n# Email: nguyenthieu2102@gmail.com %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieunguyen5991 %\n# -------------------------------------------------------------------------------------------------------%\n\nfrom numpy import reshape, array, log, exp, sign, abs, power, floor\nfrom numpy.random import seed, permutation\nfrom scipy.stats import boxcox\nfrom scipy.special import inv_boxcox\nfrom pandas import Series\nfrom matplotlib import pyplot as plt\n\n\nclass CheckDataset:\n \"\"\"\n Checking whether data is stationary or non-stationary (trend, seasonality, ...)\n https://machinelearningmastery.com/time-series-data-stationary-python/\n https://machinelearningmastery.com/difference-time-series-dataset-python/\n \"\"\"\n\n def check_by_plot_raw_data(self, pathfile=None):\n self.series = Series.from_csv(pathfile, header=0)\n self.series.plot()\n plt.show()\n\n def check_by_summary_statistic(self, pathfile=None, draw_history=True):\n \"\"\"\n You can split your time series into two (or more) partitions and compare the mean and variance of each group.\n If they differ and the difference is statistically significant, the time series is likely non-stationary.\n\n Because we are looking at the mean and variance, we are assuming that the data conforms to a Gaussian\n (also called the bell curve or normal) distribution. ==> Stationary\n \"\"\"\n self.series = Series.from_csv(pathfile, header=0)\n X = self.series.values\n split = int(len(X) / 2)\n X1, X2 = X[0:split], X[split:]\n mean1, mean2 = X1.mean(), X2.mean()\n var1, var2 = X1.var(), X2.var()\n\n self.series.hist()\n print('mean1=%f, mean2=%f' % (mean1, mean2))\n print('variance1=%f, variance2=%f' % (var1, var2))\n\n if draw_history:\n self.series.hist()\n plt.show()\n\n def _checking_consecutive__(self, df, time_name=\"timestamp\", time_different=300):\n \"\"\"\n :param df: Type of this must be dataframe\n :param time_name: the column name of date time\n :param time_different by seconds: 300 = 5 minutes\n https://pandas.pydata.org/pandas-docs/version/0.23.4/generated/pandas.Timedelta.html\n :return:\n \"\"\"\n consecutive = True\n for i in range(df.shape[0] - 1):\n diff = (df[time_name].iloc[i + 1] - df[time_name].iloc[i]).seconds\n if time_different != diff:\n print(\"===========Not consecutive at: {}, different: {} ====================\".format(i + 3, diff))\n consecutive = False\n return consecutive\n\n\nclass MiniBatch(object):\n def __init__(self, X_train, y_train, batch_size):\n self.X_train = X_train\n self.y_train = y_train\n self.batch_size = batch_size\n\n def random_mini_batches(self, seed_number=None):\n X, Y = self.X_train.T, self.y_train.T\n mini_batch_size = self.batch_size\n\n m = X.shape[1] # number of training examples\n mini_batches = []\n seed(seed_number)\n\n # Step 1: Shuffle (X, Y)\n perm = list(permutation(m))\n shuffled_X = X[:, perm]\n shuffled_Y = Y[:, perm].reshape((Y.shape[0], m))\n\n # Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.\n num_complete_minibatches = int(floor(m / mini_batch_size)) # number of mini batches of size mini_batch_size in your partitionning\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[:, k * mini_batch_size: (k + 1) * mini_batch_size]\n mini_batch_Y = shuffled_Y[:, k * mini_batch_size: (k + 1) * mini_batch_size]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n\n # Handling the end case (last mini-batch < mini_batch_size)\n if m % mini_batch_size != 0:\n mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size: m]\n mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size: m]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n\n return mini_batches\n\n\nclass TimeSeries:\n def __init__(self, data=None, train_split=0.8):\n self.data_original = data\n if train_split < 1.0:\n self.train_split = int(train_split * self.data_original.shape[0])\n else:\n self.train_split = train_split\n self.multi_size = len(self.data_original[0]) # Remember if using multi-variate --> The last column is the predicted output.\n\n def _scaling__(self, scale_type=\"std\", separate=True):\n \"\"\"\n :param dataset: 2D numpy array\n :param scale_type: std / minmax\n :return:\n \"\"\"\n self.data_new =[]\n self.data_mean=[]\n self.data_std = []\n self.data_min = []\n self.data_max = []\n\n self.data_kurtosis = []\n self.data_mean_kur = []\n self.data_std_kur = []\n self.lamda_boxcox = []\n self.data_boxcox = []\n\n for i in range(self.multi_size):\n col_data_original = self.data_original[:,i]\n\n if separate:\n col_data_mean, col_data_std = col_data_original[:self.train_split].mean(axis=0), col_data_original[:self.train_split].std(axis=0)\n col_data_min, col_data_max = col_data_original[:self.train_split].min(axis=0), col_data_original[:self.train_split].max(axis=0)\n else:\n col_data_mean, col_data_std = col_data_original.mean(axis=0), col_data_original.std(axis=0)\n col_data_min, col_data_max = col_data_original.min(axis=0), col_data_original.max(axis=0)\n\n if scale_type == \"std\":\n col_data_new = (col_data_original - col_data_mean) / col_data_std\n elif scale_type == \"minmax\":\n col_data_new = (col_data_original - col_data_min) / (col_data_max - col_data_min)\n elif scale_type == \"loge\":\n col_data_new = log(col_data_original)\n\n elif scale_type == \"kurtosis\":\n col_data_new = sign(col_data_original - col_data_mean) * power(abs(col_data_original - col_data_mean), 1.0 / 3)\n elif scale_type == \"kurtosis_std\":\n col_data_kurtosis = sign(col_data_original - col_data_mean) * power(abs(col_data_original - col_data_mean), 1.0 / 3)\n col_data_mean_kur, col_data_std_kur = col_data_original[:self.train_split].mean(axis=0), col_data_original[:self.train_split].std(axis=0)\n col_data_new = (col_data_kurtosis - col_data_mean_kur) / col_data_std_kur\n self.data_kurtosis.append(col_data_kurtosis)\n self.data_mean_kur.append(col_data_mean_kur)\n self.data_std_kur.append(col_data_std_kur)\n elif scale_type == \"boxcox\":\n col_data_new, col_lamda_boxcox = boxcox(col_data_original.flatten())\n elif scale_type == \"boxcox_std\":\n col_data_boxcox, col_lamda_boxcox = boxcox(col_data_original.flatten())\n col_data_boxcox = col_data_boxcox.reshape(-1, 1)\n col_data_mean,col_data_std = col_data_boxcox[:self.train_split].mean(axis=0), col_data_boxcox[:self.train_split].std(axis=0)\n col_data_new = (col_data_boxcox - col_data_mean) / col_data_std\n self.lamda_boxcox.append(col_lamda_boxcox)\n self.data_boxcox.append(col_data_boxcox)\n\n self.data_mean.append(col_data_mean)\n self.data_std.append(col_data_std)\n self.data_min.append(col_data_min)\n self.data_max.append(col_data_max)\n self.data_new.append(col_data_new)\n return array(self.data_new)\n\n def _inverse_scaling__(self, data=None, scale_type=\"std\"):\n if scale_type == \"std\":\n return self.data_std[self.multi_size-1] * data + self.data_mean[self.multi_size-1]\n elif scale_type == \"minmax\":\n return data * (self.data_max[self.multi_size-1] - self.data_min[self.multi_size-1]) + self.data_min[self.multi_size-1]\n elif scale_type == \"loge\":\n return exp(data)\n\n elif scale_type == \"kurtosis\":\n return power(data, 3) + self.data_mean[self.multi_size-1]\n elif scale_type == \"kurtosis_std\":\n temp = self.data_std_kur[self.multi_size-1] * data + self.data_mean_kur[self.multi_size-1]\n return power(temp, 3) + self.data_mean[self.multi_size-1]\n\n elif scale_type == \"boxcox\":\n return inv_boxcox(data, self.lamda_boxcox[self.multi_size-1])\n elif scale_type == \"boxcox_std\":\n boxcox_invert = self.data_std[self.multi_size-1] * data + self.data_mean[self.multi_size-1]\n return inv_boxcox(boxcox_invert, self.lamda_boxcox[self.multi_size-1])\n\n def _make_train_test_data__(self, dataset, history_column=None, start_index=0, end_index=None, pre_type=\"2D\"):\n \"\"\"\n :param dataset: 2-D numpy array\n :param history_column: python list time in the past you want to use. (1, 2, 5) means (t-1, t-2, t-5) predict time t\n :param start_index: 0- training set, N- valid or testing set\n :param end_index: N-training or valid set, None-testing set\n :param pre_type: 3D for RNN-based, 2D for normal neural network like MLP, FFLN,..\n :return:\n \"\"\"\n data = []\n labels = []\n\n history_size = len(history_column)\n if end_index is None:\n end_index = len(dataset[self.multi_size-1]) - history_column[-1] - 1 # for time t, such as: t-1, t-4, t-7 and finally t\n else:\n end_index = end_index - history_column[-1] - 1\n\n for i in range(start_index, end_index):\n indices = i - 1 + array(history_column)\n # Reshape data from (history_size,) to (history_size, 1)\n data.append([])\n for j in range(self.multi_size):\n for vl in reshape(dataset[j][indices], (history_size, 1)):\n data[i-start_index].append(vl)\n labels.append(dataset[self.multi_size-1][i + history_column[-1]])\n if pre_type == \"3D\":\n return array(data), array(labels)\n return reshape(array(data), (-1, history_size*self.multi_size)), reshape(array(labels), (-1, 1))\n\n\n def _make_train_test_with_expanded__(self, dataset, history_column=None, start_index=0, end_index=None, pre_type=\"2D\", expand_func=None):\n data = []\n labels = []\n\n history_size = len(history_column)\n if end_index is None:\n end_index = len(dataset[self.multi_size - 1]) - history_column[-1] - 1 # for time t, such as: t-1, t-4, t-7 and finally t\n else:\n end_index = end_index - history_column[-1] - 1\n\n for i in range(start_index, end_index):\n indices = i - 1 + array(history_column)\n # Reshape data from (history_size,) to (history_size, 1)\n data.append([])\n for j in range(self.multi_size):\n for vl in reshape(dataset[j][indices], (history_size, 1)):\n data[i - start_index].append(vl)\n labels.append(dataset[self.multi_size - 1][i + history_column[-1]])\n\n # Make the matrix X and Column Matrix y\n data = reshape(array(data), (-1, history_size * self.multi_size))\n labels = reshape(array(labels), (-1, 1))\n\n ## Expanded function applied here\n data = expand_func(data)\n\n if pre_type == \"3D\":\n return array(data), labels\n return data, labels\n","sub_path":"utils/PreprocessingUtil.py","file_name":"PreprocessingUtil.py","file_ext":"py","file_size_in_byte":12160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"492048635","text":"import numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pprint import pprint\niris = pd.read_csv(\"5.csv\") #load the dataset\n# print(iris[0:2])\n\nX = iris.iloc[:,1:5]\nprint(X)\n# print(type(X))\n\n\n# import sys \n# sys.exit()\ndef plot_model(lables, alg_name, plot_index):\n fig = plt.figure(figsize=(15,15))\n # plt.figure(plot_index)\n ax = fig.add_subplot(1, 1, plot_index) \n # ax = fig.add_subplot(3, 2, plot_index) \n color_code = {'anomaly': 'red', 'normal': 'green'}\n colors = [color_code[x] for x in labels]\n\n ax.scatter(X.iloc[:,0], X.iloc[:,1], color=colors, marker='.', label='red = anomaly')\n ax.legend(loc=\"lower right\")\n\n leg = plt.gca().get_legend()\n leg.legendHandles[0].set_color('red')\n\n ax.set_title(alg_name)\n plt.show()\n\n\n# nessacry configuration for some algorithms\noutliers_fraction = 0.05\n\n# ---------------- DBSCAN -----------\n# from sklearn.cluster import DBSCAN\n\n# # eps: maximum distance between two samples\n# model = DBSCAN(eps=0.63).fit(X)\n# labels = model.labels_\n# # print(labels)\n# # label == -1 then it is anomaly\n# labels = [('anomaly' if x==-1 else 'normal') for x in labels]\n\n# print(labels)\n# plot_model(labels, 'DBSCAN', 1)\n\n\n#----------- Isolation Forest --------------------------\nfrom sklearn.ensemble import IsolationForest\nfrom scipy import stats\n\nmodel = IsolationForest().fit(X)\n# print(model)\nscores_pred = model.decision_function(X)\n# print(scores_pred)\n\nthreshold = stats.scoreatpercentile(scores_pred, 100 * outliers_fraction)\n# print(threshold)\n\nlabels = [('anomaly' if x < threshold else 'normal') for x in scores_pred]\n# print(labels)\nplot_model(labels, 'Isolation Forest', 1)\n# print(model)\ndata = []\nfor idx, val in enumerate(scores_pred):\n # print('{}: {}: {}'.format(idx, val, labels[idx]))\n data.append((idx, val, labels[idx]))\n\n# print(len(scores_pred))\npprint(data)\n# print(dict(X))\nfor idx, item in enumerate(data):\n if item[2] == 'anomaly':\n print(X.iloc[[idx]])\n# ---------- LocalOutlierFactor -----------\n# from sklearn.neighbors import LocalOutlierFactor \n\n# model = LocalOutlierFactor()\n# model.fit_predict(X)\n# scores_pred = model.negative_outlier_factor_\n# threshold = stats.scoreatpercentile(scores_pred, 100 * outliers_fraction)\n\n# labels = [('anomaly' if x\n if data[i][j] ==\"<\": #attention not begin=i, begin=j\n begin=j\n if data[i][j] ==\">\":\n end=j\n tags.append(data[i][begin:end+1])\n #remove tag\n for tag in tags:\n data[i] = data[i].replace(tag,\"\")\n #remove whitespace from list\n data[i] = data[i].strip()\n\n\n#remove empty line\nunempty_lines=[]\nfor i in range(len(data)):\n if data[i]!=\"\":\n unempty_lines.append(data[i])\ndata = unempty_lines\n\nfile = open(\"test.txt\",encoding=\"utf8\",mode=\"w\")\nfor i in range(len(data)):\n file.write(data[i]+\"\\n\")\n\n# #name, dob, score\n# name = data[7]\n# dob = data[8]\n# score = data[9]\n\n# #load unicode table\n# chars = []\n# codes = []\n\n# file = open(\"unicode.txt\",encoding=\"utf8\")\n# unicode_table = file.read().split(\"\\n\")\n# for code in unicode_table:\n# x = code.split(\" \")\n# chars.append(x[0])\n# codes.append(x[1])\n\n# #replace code by char\n# for i in range(len(codes)):\n# name = name.replace(codes[i],chars[i])\n# score = score.replace(codes[i],chars[i])\n\n# #replace &# in name and score\n# for i in range(len(name)):\n# if name[i:i+2] == \"&#\":\n# name = name[:i]+ chr(int(name[i+2:i+5]))+name[i+6:]\n# for i in range(len(score)):\n# if score[i:i+2] == \"&#\":\n# score = score[:i]+ chr(int(score[i+2:i+5]))+score[i+6:]\n\n# #change to lower case\n# name = name.lower()\n# score = score.lower()\n# #split dob\n# dob_list=dob.split(\"/\")\n# dd = int(dob_list[0])\n# mm = int(dob_list[1])\n# yy = int(dob_list[2])\n\n# #process score\n# #remove :\n# score = score.replace(\":\",\"\")\n# score = score.replace(\"khtn \",\"khtn \")\n# score = score.replace(\"khxh \",\"khxh \")\n# score = score.split(\" \")\n# print(score)\n# data = [name, str(dd), str(mm), str(yy)]\n\n# for subject in [\"toán\",\"ngữ văn\",\"khxh\",\"khtn\",\"lịch sử\",\"địa lí\",\"gdcd\",\"sinh học\",\"vật lí\",\"hóa học\",\"tiếng anh\"]:\n# if subject in score:\n# data.append(str(float(score[score.index(subject)+1])))\n# else:\n# data.append(\"-1\")\n\n# #write to the new file\n# file = open(\"test.txt\",encoding=\"utf8\",mode=\"w\")\n# for i in range(len(data)):\n# file.write(data[i]+\",\")\n","sub_path":"Unit2/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"389287793","text":"from shadowlands.sl_dapp import SLFrame\nfrom datetime import datetime, timedelta\n\nclass ENSAuctionFrame(SLFrame):\n def initialize(self):\n auction_status = self._dapp.ens_auction_contract.auction_status(self.dapp.name)\n\n if auction_status == 0:\n self.add_label(\"You can start an auction for this name.\")\n self.add_ok_cancel_buttons(self._start_auction_choice, self._cancel)\n elif auction_status == 1:\n self.add_label(\"The auction for '{}' has begun.\".format(self.dapp.name))\n reveal_date = self.dapp.ens_auction_contract.reveal_date(self.dapp.name)\n time_left = reveal_date - datetime.now() - timedelta(days=2)\n\n if time_left.total_seconds() > 0:\n minutes_left = time_left.total_seconds() // 60\n hours_left = minutes_left // 60\n days_left = round(hours_left / 24, 1)\n if minutes_left < 60:\n self.add_label(\"{} minutes left to bid.\".format(minutes_left))\n elif hours_left < 24:\n self.add_label(\"{} hours left to bid.\".format(hours_left))\n else:\n self.add_label(\"{} days left to bid.\".format(days_left))\n \n self.bidvalue = self.add_textbox(\"Bid Amount {}\".format(self.dapp.config.CURR_SYMBOLS['ETH']))\n self.bid_secret_value = self.add_textbox(\"Bid Secret Phrase:\")\n self.add_ok_cancel_buttons(self._make_bid_choice, self._cancel, ok_text=\"Bid\")\n \n #salt = ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(32)])\n elif auction_status == 4:\n self.add_label(\"It is time to reveal the bids for this name auction.\")\n reveal_date = self.dapp.ens_auction_contract.reveal_date(self.dapp.name)\n time_left = reveal_date - datetime.now()\n\n if time_left.total_seconds() > 0:\n minutes_left = time_left.total_seconds() // 60\n hours_left = minutes_left // 60\n days_left = round(hours_left / 24, 1)\n if minutes_left < 60:\n self.add_label(\"{} minutes left to reveal your bid.\".format(minutes_left))\n elif hours_left < 24:\n self.add_label(\"{} hours left to reveal your bid.\".format(hours_left))\n else:\n self.add_label(\"{} days left to reveal your bid.\".format(days_left))\n\n self.bid_amount_value = self.add_textbox(\"Bid amount:\")\n self.bid_secret_value = self.add_textbox(\"Bid Secret Phrase:\")\n self.add_ok_cancel_buttons(self._reveal_bid_choice, self._cancel, ok_text=\"Reveal Bid\")\n \n def _reveal_bid_choice(self):\n self.dapp.add_transaction_dialog(\n tx_fn=self._reveal_bid_tx,\n destroy_window=self, \n title=\"Reveal Bid\"\n )\n\n def _reveal_bid_tx(self):\n return self.dapp.ens_auction_contract.unsealBid(\n self.dapp.name, \n self.bid_amount_value(), \n self.bid_secret_value())\n\n def _start_auction_choice(self):\n self.dapp.add_transaction_dialog(\n tx_fn=self._send_auction_tx,\n destroy_window=self, \n title=\"Begin Auction\"\n )\n\n def _send_auction_tx(self):\n return self.dapp.ens_auction_contract.start_auction(self.dapp.name)\n\n def _make_bid_choice(self):\n self.dapp.add_transaction_dialog(\n tx_fn=self._make_bid_tx,\n tx_value=self.bidvalue(),\n destroy_window=self, \n title=\"Bid on name '{}'\".format(self.dapp.name)\n )\n\n def _make_bid_tx(self):\n return self.dapp.ens_auction_contract.place_bid(self.dapp.name,\n self.dapp.node.credstick.addressStr(),\n self.bidvalue(),\n self.bid_secret_value())\n\n def _cancel(self):\n self.close()\n self.dapp.quit()\n\n","sub_path":"ens_manager/auction_frame.py","file_name":"auction_frame.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"295080294","text":"from sys import argv\r\n\r\n#script calls an argument defined at execution\r\nscript, user_name = argv\r\n#prompt can be anything as long as it is quoted\r\nprompt = '> '\r\n\r\n\r\n#takes the argument provided and uses the script name\r\n#stores input in the likes variable\r\nprint(f\"Hi {user_name}, I'm the {script} script.\")\r\nprint(\"I'd like to ask you a few questions.\")\r\nprint(f\"Do you like me {user_name}?\")\r\nlikes = input(prompt)\r\n\r\n#stores input in lives variable\r\nprint(f\"Where do you live {user_name}?\")\r\nlives = input(prompt)\r\n\r\n#stores input in computer variable\r\nprint(\"What kind of computer do you have?\")\r\ncomputer = input(prompt)\r\n\r\n#uses style multi-line string and shows all user inputs\r\nprint(f\"\"\"\r\nAlright, so you said {likes} about liking me.\r\nYou live in {lives}. Not sure where that is.\r\nAnd you have a {computer} computer. Nice.\r\n\"\"\")\r\n","sub_path":"ex14.py","file_name":"ex14.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"86465472","text":"__author__ = 'rApeNB'\n# -*- coding: utf-8 -*-\n\n\"\"\"\nOne difference between the threading and multiprocessing examples is the extra\nprotection for __main__ used in the multiprocessing examples. Due to the way\nthe new processes are started, the child process needs to be able to import the\nscript containing the target function. Wrapping the main part of the\napplication in a check for __main__ ensures that it is not run recursively in\neach child as the module is imported. Another approach is to import the target\nfunction from a separate script.\n\"\"\"\n\nimport multiprocessing\nimport multiprocessing_import_worker\n\nif __name__ == '__main__':\n jobs = []\n for i in xrange(5):\n p = multiprocessing.Process(\n target=multiprocessing_import_worker.worker)\n jobs.append(p)\n p.start()\n","sub_path":"r_multiprocessing/multiprocessing_import_main.py","file_name":"multiprocessing_import_main.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"440809707","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('service', '0002_auto_20150322_0845'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Item',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('item_code', models.CharField(max_length=120)),\n ('item_description', models.CharField(max_length=300)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ItemType',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=120)),\n ('description', models.CharField(max_length=300)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.DeleteModel(\n name='Service',\n ),\n migrations.AddField(\n model_name='item',\n name='ItemType',\n field=models.ForeignKey(to='service.ItemType'),\n preserve_default=True,\n ),\n ]\n","sub_path":"service/migrations/0003_auto_20150322_0854.py","file_name":"0003_auto_20150322_0854.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"530056009","text":"import requests\n\nnoMoreHits = 'Inga träffar på slutpriser'\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\nresult = ''\ncontent = ''\npage = 1\nwhile not content.__contains__(noMoreHits) and page < 200:\n result = result + content\n path = f'https://www.hemnet.se/salda/bostader?item_types%5B%5D=bostadsratt&location_ids%5B%5D=898472&page={page}&sold_age=6m'\n response = requests.get(path, headers=headers)\n content = response.text\n page = page + 1\n\nnew_path = 'HemnetSearchResults2.html'\nnew_days = open(new_path, mode='w', encoding='utf-8')\nnew_days.write(result)\n\nprint(\"done\")\n","sub_path":"1Downloader.py","file_name":"1Downloader.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"274665205","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pywikibot, re, sys, argparse\n\nimport blib\nfrom blib import getparam, rmparam, msg, site, tname\n\nblib.getData()\n\ntemplates = [\n \"pos a\",\n \"pos adj\",\n \"pos adv\",\n \"pos adverb\",\n \"pos n\",\n \"pos noun\",\n \"pos v\",\n \"pos verb\"\n]\n\npos_to_pos = {\n 'a': 'a',\n 'adj': 'a',\n 'adv': 'adv',\n 'adverb': 'adv',\n 'n': 'n',\n 'noun': 'n',\n 'v': 'v',\n 'verb': 'v'\n}\n\ndef process_page(page, index, parsed):\n pagetitle = str(page.title())\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n pagemsg(\"Processing\")\n notes = []\n\n text = str(page.text)\n\n def replace_pos(m):\n return \"%s|pos=%s}}\" % (m.group(1), pos_to_pos[m.group(2)])\n\n newtext = re.sub(r\"(\\{\\{l\\|.*?)\\}\\} \\{\\{pos[ _](.*?)\\}\\}\", replace_pos,\n text)\n if newtext != text:\n notes.append(\"move {{pos *}} inside of link\")\n text = newtext\n\n sections = re.split(\"(^==[^=]*==\\n)\", text, 0, re.M)\n\n for j in range(2, len(sections), 2):\n m = re.search(\"^==(.*)==\\n$\", sections[j - 1])\n assert m\n langname = m.group(1)\n if langname not in blib.languages_byCanonicalName:\n langnamecode = None\n else:\n langnamecode = blib.languages_byCanonicalName[langname][\"code\"]\n def replace_raw_pos(m):\n if not langnamecode:\n msg(\"WARNING: Unable to parse langname %s when trying to replace raw link %s\" % (\n langname, m.group(0)))\n return m.group(0)\n return \"\\n* {{l|%s|%s|pos=%s}}\" % (langnamecode, m.group(1), pos_to_pos[m.group(2)])\n newsec = re.sub(r\"\\n\\* \\[\\[([^\\[\\]\\n]*?)\\]\\] \\{\\{pos[ _](.*?)\\}\\}\",\n replace_raw_pos, sections[j])\n if newsec != sections[j]:\n notes.append(\"move {{pos *}} inside of raw link\")\n sections[j] = newsec\n\n text = \"\".join(sections)\n\n return text, notes\n\nparser = blib.create_argparser(\"Move {{pos *}} declarations inside of links\")\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nfor template in templates:\n msg(\"Processing references to Template:%s\" % template)\n for i, page in blib.references(\"Template:%s\" % template, start, end):\n blib.do_edit(page, i, process_page, save=args.save, verbose=args.verbose)\n","sub_path":"clean_pos_templates.py","file_name":"clean_pos_templates.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"458023743","text":"\"\"\"This module is used for main method\"\"\"\nfrom __future__ import print_function\nimport argparse\nimport logging\nfrom src.driver.scientific_calc import ScientificCalc\nlogging.basicConfig(filename='logger_file.log', level=logging.ERROR,\n format='%(asctime)s : %(levelname)s : %(message)s : %(lineno)s', filemode='w')\n\n\ndef main():\n \"\"\"This main method is used to call the scientific operation 10 power x\"\"\"\n try:\n power_x_obj = ScientificCalc()\n parser = argparse.ArgumentParser()\n parser.add_argument('--function', type=str, required=True, nargs='+')\n args = parser.parse_args()\n method_name = args.function\n if method_name[0] == 'cal_power_ten':\n power_val = method_name[1]\n print(power_x_obj.cal_power_ten(power_val))\n\n except ValueError as value_error:\n logging.error(value_error)\n print('Error has occured')\n","sub_path":"src/main/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"96823858","text":"# -*- coding: utf-8 -*-\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom balance import BalanceRequired\n\n\nclass CDRRequired(BalanceRequired):\n def page(self):\n success = False\n\n if 'page' in getattr(self.request, self._method):\n\n page = getattr(self.request, self._method)['page']\n try:\n page = int(page)\n except ValueError:\n self._errors['title'] = u'Ошибка приложения'\n self._errors['message'] = u'page не число'\n else:\n success = True\n self._params['page'] = page\n else:\n success = True\n self._params['page'] = 1\n\n return success\n","sub_path":"backend/lib/required/cdr.py","file_name":"cdr.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"243878712","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nSAMPLES_PER_SEC = 44100\nDURATION_S = 1\nnum_samples = SAMPLES_PER_SEC * DURATION_S\n\n\ndef get_times(duration_s=1.0, sample_rate=44100):\n ts = np.linspace(0., duration_s, round(duration_s * sample_rate))\n return ts\n\n\n# why do I worry about x values an not just assume 0 to duration * samples\ndef create_sine_wave(duration=1.0, freq=440, amp=1.0):\n ts = get_times(duration)\n samples = amp * np.sin(np.pi * 2 * freq * ts)\n return ts, samples\n\n\ndef normalize_data(data):\n # make sure amplitude is not 0\n max_amp = max(max(data), abs(min(data)))\n normal_data = data / max_amp\n return normal_data\n\n\n# why not do in one step with normalize\ndef quantize_wave(data, samp_rate=44100):\n # make amplitude half of the int16 min/max\n if max(data) > 1:\n data = normalize_data(data)\n # check to see if normalized to -1 to 1\n # keep this close to max rather than .5\n desired_amplitude = np.iinfo(np.int16).max * .5\n samples = np.int16(desired_amplitude * data)\n return samples\n\n\n# need to fix this so data is truncated to duration * sample_rate\ndef plot_wave(duration, data):\n num_samples = round(duration * SAMPLES_PER_SEC)\n ts = np.linspace(0, duration, num_samples)\n plt.plot(ts, data[:num_samples])\n # plt.legend()\n plt.xlabel(\"Time [s]\")\n plt.ylabel(\"Amplitude\")\n plt.show()\n\n\ndef add_waves(wave_array):\n result = sum(wave_array)\n return result\n\n\ndef mod_waves(wave1, mod_wave):\n result = wave1 * mod_wave\n return result\n","sub_path":"waves.py","file_name":"waves.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"36822877","text":"from app.schemas.fields import StringTrim\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\n\n\nclass ScrOcorrenciaEdiSchema(Schema):\n class Meta:\n type_ = 'scr_ocorrencia_edi'\n self_view = 'scr_ocorrencia_edi_detail'\n self_view_kwargs = {'id': ''}\n self_view_many = 'scr_ocorrencia_edi_list'\n\n id = fields.Integer(as_string=True, dump_only=True)\n codigo_edi = fields.Integer(required=True, as_string=True)\n ocorrencia = StringTrim(required=True)\n\n\n","sub_path":"web/app/schemas/scr_ocorrencia_edi_schema.py","file_name":"scr_ocorrencia_edi_schema.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"100885523","text":"from game.strings import strings\nfrom game.properties import *\nfrom game.cards import *\nfrom game.constants import *\nimport random\n\nplayers = []\nclass player():\n def __init__(self, avatar, ai):\n self.money = 1500\n self.properties = []\n self.position = 0\n self.avatar = avatar\n self.double_die = 0\n self.ai = ai\n self.bid = 0\n self.get_out_of_jail = 0\n self.in_jail = False\n self.is_bankrupt = False\n\n def buy(self, property, bid = None):\n if bid == None:\n cost = property.cost\n else:\n cost = bid\n #auction if not purchased\n if self.money >= cost:\n self.money -= cost\n self.properties.append(property) #this looks useless after adding owners\n property.owner = self\n #probably need to remove it from the bank's pool\n #need a better way, this also removes the ability to check the current tile\n else:\n print(strings(\"no_money\"))\n\n def roll_die(self, s_util = None):\n self.die1 = random.randint(1, 6)\n self.die2 = random.randint(1, 6)\n #double test\n #self.die1 = 2\n #self.die2 = 2\n print(strings(\"roll_die\") % (self.avatar, self.die1, self.die2, (self.die1 + self.die2)))\n if s_util == None:\n #check for doubles, etc\n if self.double_die >= 2:\n print(strings(\"roll_3_d\") % self.avatar)\n self.position = tiles[10].position #jail\n self.in_jail = True\n #again, replace this with a going to jail function\n self.double_die = 0\n else:\n self.move(self.die1 + self.die2, True)\n if self.die1 == self.die2:\n #needs to purchase/pay rent etc before rerolling\n #we are doing the purchases with .move() so it should be alright\n print(strings(\"roll_d\") % self.avatar)\n self.double_die += 1\n #shouldn't roll by itself again\n #but how to manage ai?\n if self.ai:\n #self.roll_die()\n #not really this ^\n #but something else\n pass\n if self.die1 != self.die2:\n self.double_die = 0\n else:\n return (self.die1 + self.die2)\n\n def move(self, position, die = False):\n if die:\n self.position += position\n if self.position >= 40:\n self.money += GO_SALARY\n self.position %= 40\n print(strings(\"move_pass_GO\")% (self.avatar, GO_SALARY))\n else:\n self.position = position\n self.cur_tile = tiles[self.position]\n #checks for special positions here\n #stations and utils also can be auctioned\n if self.cur_tile in availables:\n print(\"it's a prop\")\n if self.ai:\n #ai stuff\n pass\n else:\n #it's owned by another player\n if self.cur_tile.owner != None and self.cur_tile.owner != self:\n print(\"already owned\")\n self.pay_rent(self.cur_tile)\n else:\n print(strings(\"move_buy_prop\") % (self.position, self.cur_tile.name, self.cur_tile.cost))\n while True:\n i = input().lower()\n if i == \"y\":\n self.buy(self.cur_tile)\n break\n elif i == \"n\":\n self.auction(self.cur_tile)\n break\n else:\n print(strings(\"err_yn\"))\n elif self.cur_tile in decks:\n print(\"it's a chance\")\n if self.cur_tile.name == \"Chance\":\n self.chance(True)\n else:\n self.chance(False)\n elif self.cur_tile in taxes:\n if self.money >= self.cur_tile.cost:\n self.money -= self.cur_tile.cost\n print(strings(\"move_pay_tax\") % (self.avatar, self.cur_tile.cost, self.cur_tile.name))\n else:\n #mortgage stuff here\n #probably mortgage_or_sell_house?\n #print(strings(\"no_money\"))\n pass\n elif self.cur_tile in miscs:\n if self.cur_tile == miscs[0]:\n pass\n elif self.cur_tile == miscs[1]:\n print(strings(\"move_visit_jail\") % self.avatar)\n elif self.cur_tile == miscs[2]:\n print(self.cur_tile.name)\n elif self.cur_tile == miscs[3]:\n self.cc_go_to_jail() #might not be correct way to do it\n #make a separate go to jail function to also get 3 double dies?\n\n def pay_rent(self, property):\n if property in props:\n houses = list(map(int, property.houses.split(\" \")))\n if property.house_count >= 1:\n rent = houses[property.house_count-1]\n print(strings(\"rent_house\") % (property.name, property.house_count, rent))\n elif property.has_hotel:\n rent = property.hotel\n print(strings(\"rent_hotel\")% (property.name, rent))\n else:\n rent = property.rent\n print(strings(\"rent_none\") % (property.name, rent))\n elif property in stations:\n station_count = 0\n for f in stations:\n if f.owner == property.owner:\n station_count += 1\n if station_count == 4:\n rent = 200\n elif station_count == 3:\n rent = 100\n elif station_count == 2:\n rent = 50\n else:\n rent = 25\n print(strings(\"rent_station\") % (property.owner.avatar, station_count, rent))\n else: #elif property in utils:\n #4xdice if 1 owned, 10xdice if 2.\n util_count = 0\n for f in utils:\n if f.owner == property.owner:\n util_count += 1\n if util_count == 2:\n print(strings(\"rent_util_2\") % property.owner.avatar)\n rent = self.roll_die(True) * 10\n else:\n print(strings(\"rent_util_1\") % property.owner.avatar)\n rent = self.roll_die(True) * 4\n if self.money >= rent:\n self.money -= rent\n property.owner.money += rent\n print(strings(\"rent_pay\") % (self.avatar, property.owner.avatar, rent))\n else:\n print(strings(\"rent_fail\") % (self.avatar))\n #mortgage etc. here\n\n def auction(self, property):\n print(strings(\"auction\") % (self.avatar, property.name))\n self.auction_bid = 1\n #self.ai_bids = 0 #do an increase with each turn?\n #while True:\n for p in players[1:]: #ai bidding\n self.auction_bid = random.randint(self.auction_bid, (p.money/5)) #1/4th is low?\n p.bid = self.auction_bid\n #this is flawed because as long as the last robot has money, it will get the property\n #instead do randomize bids without giving a low limit of auction_bid to solve?\n print(strings(\"auc_bids\"))\n for p in players[1:]:\n print(strings(\"auc_list\") % (p.avatar, p.bid))\n print(strings(\"auc_or_s\") % (self.auction_bid, self.money)) #probably need to check if the player has more money here.\n while True:\n try:\n str = input()\n try:\n self.bid = int(str)\n except ValueError:\n pass\n if str == \"s\":\n #selling to the highest bidder here\n for p in players:\n if self.auction_bid == p.bid:\n p.buy(property, p.bid)\n print(strings(\"auc_win\") % (p.avatar, p.bid))\n break\n elif self.bid > self.auction_bid and self.bid <= self.money:\n #buying self here\n print(strings(\"auc_win\") % (self.avatar, self.bid))\n self.buy(property, self.bid)\n break\n else:\n print(strings(\"auc_or_s\") % (self.auction_bid, self.money))\n except ValueError:\n print(strings(\"auc_or_s\") % (self.auction_bid, self.money))\n def choose_mortgage(self, fee):\n #ask if they want to sell houses or mortgage first\n print(strings(\"mort_need\") % (self.avatar, self.money, fee))\n #maybe put above to move?\n if len(self.properties) > 0:\n print(strings(\"mort_cho\"))\n print(strings(\"mort_list_title\"))\n temp_list = []\n x = 0\n for p in self.properties:\n if not p.is_mortgaged:\n x += 1\n print(strings(\"mort_list\") % (x, (p.cost/2), p.colour, p.name))\n temp_list.append(p)\n while True:\n try:\n s = int(input())\n self.mortgage(temp_list[s-1], fee)\n break\n except (ValueError, IndexError):\n print(strings(\"err_n\") % (len(temp_list)))\n else:\n self.bankruptcy()\n def mortgage(self, property, fee):\n #need to sell houses? before mortgaging\n property.is_mortgaged = True\n self.money += property.cost/2\n if self.money >= fee:\n print(strings(\"mort_full\") % (self.avatar, property.name, property.cost/2, fee))\n self.money -= fee\n else:\n print(strings(\"mort_some\") % (self.avatar, property.name, property.cost/2))\n leftover = fee - self.money\n self.money = 0\n self.choose_mortgage(leftover)\n def bankruptcy(self):\n print(strings(\"bankrupt\") % self.avatar)\n self.is_bankrupt = True\n #add an if not is_bankrupt switch to every action or remove them from player pool?\n #maybe won't be necessary after i implement turn() function and can check there\n def build_house(self, property):\n #browns, light blues $50\n #pinks, oranges $100\n #reds, yellows $150\n #greens, dark blues $200\n #a hotel cost same as a house, but needs 4 built\n #also limit the max amount of houses in games.py\n #in total: 32 houses, 12 hotels\n #they also need to build houses evenly amongst all properties of a colour (new houses going to the one with the least amount first)\n #instead of this individual build style, only get colour and house count?\n c = property.colour\n #check if they own all colours\n temp_list = []\n for p in props:\n if p.colour == c:\n temp_list.append(p)\n owns = 0\n for p in temp_list:\n if p.owner == self:\n owns += 1\n if owns == len(temp_list):\n if c == \"brown\" or c == \"light blue\":\n cost = 50\n elif c == \"pink\" or c == \"orange\":\n cost = 100\n elif c == \"red\" or c == \"yellow\":\n cost = 150\n else: #green, dark blue\n cost = 200\n built = property.house_count\n global_houses = 0\n for p in props:\n global_houses += p.house_count\n available_houses = MAX_HOUSES - global_houses\n allowed = available_houses if available_houses < 4 else 4\n if built == 4:\n print(strings(\"house_max\") % property.name)\n else:\n if built == 0:\n print(strings(\"house_none\") % property.name)\n else:\n print(strings(\"house_some\") % (property.name, built))\n print(strings(\"house_question\"))\n permitted = 0\n if allowed > built: #4-0, 4; 3-0, 3; 4-3, 1 etc.\n permitted = allowed - built\n else: #allowed <= built\n if allowed == 3: #3-3, 1\n permitted = 1\n elif allowed == 2: #2-2, 2\n permitted = 2\n elif allowed == 1: #1-1, 1\n permitted = 1\n while True:\n try:\n s = int(input())\n if s <= permitted:\n total_cost = s * cost\n if self.money >= total_cost:\n self.money -= total_cost\n property.house_count += s\n print(strings(\"house_build\") % (self.avatar, total_cost, s, property.name))\n print(strings(\"house_total\") % property.house_count)\n break\n else:\n print(strings(\"no_money\"))\n break\n else:\n raise ValueError()\n except ValueError:\n print(strings(\"err_n\") % permitted)\n else:\n print(strings(\"house_colours\") % (self.avatar, property.colour))\n def build_hotel(self, property):\n pass\n def ai_turn(self):\n print(\"%s is rolling...\" % self.avatar)\n self.temp_pos = self.position\n self.roll_die()\n print(\"%s is moving from %d to %d.\" % (self.avatar, self.temp_pos, self.position))\n self.cur_tile = next((p for p in props if p.position == self.position), None)\n #if none check others here?\n #props, decks, stations, utils, taxes, miscs\n if self.cur_tile is None:\n self.cur_tile = next((p for p in props if p.position == self.position), None)\n print(\"%d is %s.\" % (self.position, self.cur_tile.name))\n print(\"%s has $%d, %s costs $%d.\" % (self.avatar, self.money, self.cur_tile.name, self.cur_tile.cost))\n #if can afford 110% of the cost\n if (self.money * 1.1) >= self.cur_tile.cost:\n print(\"%s decides to buy %s.\" % (self.avatar, self.cur_tile.name))\n self.buy(self.cur_tile)\n else:\n print(\"%s decides to not buy %s. It's in auction now.\" % (self.avatar, self.cur_tile.name))\n def chance(self, c_or_cc):\n if c_or_cc: #chance\n cur_card = chances[0]\n chances.append(chances.pop(0)) #puts the card to the end of the list\n print(cur_card.name)\n print(cur_card.desc)\n self.id_chance(cur_card.id)\n else: #community chest\n cur_card = chests[0]\n chests.append(chests.pop(0))\n print(cur_card.name)\n print(cur_card.desc)\n self.id_chest(cur_card.id)\n def id_chance(self, id):\n if id == 0:\n self.cc_advance_to_go()\n elif id == 1:\n self.c_advance_to_x(24)\n elif id == 2:\n self.c_advance_to_x(11)\n elif id == 3:\n self.c_advance_to_util()\n elif id == 4 or id == 5:\n self.c_advance_to_station()\n elif id == 6:\n self.cc_collect_money(50)\n elif id == 7:\n self.cc_get_out_of_jail()\n elif id == 8:\n self.c_go_back_3_spaces()\n elif id == 9:\n self.cc_go_to_jail()\n elif id == 10:\n self.cc_pay_for_houses_hotels(25, 100)\n elif id == 11:\n self.cc_pay_money(15)\n elif id == 12:\n self.c_advance_to_x(5)\n elif id == 13:\n self.c_advance_to_x(39)\n elif id == 14:\n self.c_pay_players(50)\n elif id == 15:\n self.cc_collect_money(150)\n elif id == 16:\n self.cc_collect_money(100)\n def id_chest(self, id):\n if id == 0:\n self.cc_advance_to_go()\n elif id == 1:\n self.cc_collect_money(200)\n elif id == 2 or id == 11 or id == 12:\n self.cc_pay_money(50)\n elif id == 3:\n self.cc_collect_money(50)\n elif id == 4:\n self.cc_get_out_of_jail()\n elif id == 5:\n self.cc_go_to_jail()\n elif id == 6:\n self.cc_collect_from_players(50)\n elif id == 7 or id == 10 or id == 16:\n self.cc_collect_money(100)\n elif id == 8:\n self.cc_collect_money(20)\n elif id == 9:\n self.cc_collect_from_players(10)\n elif id == 13:\n self.cc_collect_money(25)\n elif id == 14:\n self.cc_pay_for_houses_hotels(40, 115)\n elif id == 15:\n self.cc_collect_money(10)\n ##community chests\n def cc_advance_to_go(self):\n self.move(0)\n def cc_collect_money(self, money):\n self.money += money\n def cc_pay_money(self, money):\n if self.money >= money:\n self.money -= money\n else:\n pass\n #mortgage etc. here\n def cc_get_out_of_jail(self):\n self.get_out_of_jail += 1\n def cc_go_to_jail(self):\n self.position = 10\n self.in_jail = True\n def cc_collect_from_players(self, money):\n temp_list = players.copy()\n temp_list.remove(self)\n for p in temp_list:\n if p.money >= money:\n self.money += money\n p.money -= money\n else:\n pass\n #mortgage etc. here\n def cc_pay_for_houses_hotels(self, house_cost, hotel_cost):\n house_count = 0\n hotel_count = 0\n fee = 0\n for p in props:\n if p.owner == self:\n house_count += p.house_count\n if p.has_hotel:\n hotel_count += 1\n fee += (house_count * house_cost) + (hotel_count * hotel_cost)\n print(strings(\"cc_hh_count\") % (self.avatar, house_count, hotel_count))\n print(strings(\"cc_hh_fee\") % fee)\n if self.money >= fee:\n self.money -= fee\n else:\n #mortgage etc. here\n pass\n ##chances\n def c_advance_to_x(self, x):\n if self.position > x: #If you pass Go, collect $200.\n self.money += GO_SALARY\n self.move(x) #24 Trafalgar, 11 Pall, 5 Kings Cross, 39 Mayfair\n def c_advance_to_util(self):\n #Advance token to nearest Utility. If unowned, you may buy it from the Bank. If owned, throw dice and pay owner a total 10 times the amount thrown.\n if self.position > 12 and self.position < 28:\n self.move(28)\n else:\n self.move(12) #TODO: add special cases for 10x throws\n #check the rules for what happens if they are on the tile\n #same goes for the below one\n def c_advance_to_station(self):\n #Advance token to the nearest Railroad and pay owner twice the rental to which he/she is otherwise entitled. If Railroad is unowned, you may buy it from the Bank.\n #2 of these ^\n if self.position > 35 and self.position < 5:\n self.move(5)\n elif self.position > 5 and self.position < 15:\n self.move(15)\n elif self.position > 15 and self.position < 25:\n self.move(25)\n else:\n self.move(35) #TODO: add special cases for 10x throws\n def c_go_back_3_spaces(self):\n self.move((self.position - 3) % 40)\n def c_pay_players(self, money):\n temp_list = players.copy()\n temp_list.remove(self)\n fee = money * len(temp_list)\n if self.money >= fee:\n for p in temp_list:\n p.money += money\n self.money -= fee\n else:\n #mortgage etc. here\n pass\n","sub_path":"game/players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":20001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"392680470","text":"import glob\nimport os\nimport cv2\n\n\ndef main():\n\n #os.makedirs('fliped',exist_ok=True)\n img_list = glob.glob('./image_dir/*.jpg')\n\n # Flip images and save\n for path in img_list:\n name, ext = os.path.splitext(os.path.basename(path))\n img = cv2.imread(path, cv2.IMREAD_COLOR)\n flip = cv2.flip(img, 1)\n cv2.imwrite('./image_dir/fliped_{}.jpg'.format(name), flip)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pycatfd/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"344847111","text":"#!/usr/bin/env python3\n# vim: set ts=4 sw=4 et sts=4 ai:\n\n\"\"\"\nTool to figure find the USB device that a board is available at.\n\nThis is a complicated process as the FX2 is software configurable and hence\ncould end up under many different VID:PID names based on what firmware is\ncurrently loaded onto it.\n\"\"\"\n\nimport logging\nimport os\nimport os.path\nimport sys\nimport time\nimport subprocess\n\nimport argparse\n\nfrom collections import namedtuple\n\nfrom . import boards\nfrom . import __version__\n\n\ndef args_parser(board, mode):\n \"\"\"Create an parser for the arguments.\"\"\"\n parser = argparse.ArgumentParser(description=__doc__)\n\n # , aliases=['--debug', '-d'])\n parser.add_argument(\n '--verbose',\n '-v',\n action='count',\n default=0,\n help='Output more information.')\n\n parser.add_argument(\n '--version',\n action='store_true',\n help='Output version information for *modeswitch*.')\n\n parser.add_argument(\n '--by-type',\n choices=boards.BOARD_TYPES,\n help='Find board with a given type.')\n\n parser.add_argument(\n '--by-mac',\n help='Find board with the given MAC address.')\n parser.add_argument(\n '--by-dna',\n help='Find board with the given Device DNA.')\n parser.add_argument(\n '--by-position',\n help=\"\"\"\\\nFind board using a given position in the USB structure.\n\nExample:\n 1-2.3 - Bus 1, Port 2 (which is a hub), Port 3\n 5-6.7.8 - Bus 5, Port 2 (which is a hub), Port 7 (which is a hub), Port 8 (which is a hub)\n\nWhile this *should* be static across reboots, but sadly on some machines it isn't :(\n\"\"\") # noqa\n parser.add_argument(\n '--by-mode',\n help=argparse.SUPPRESS) # help='Find board in a given mode.', )\n\n parser.add_argument(\n '--all',\n help=\"\"\"\\\nDo operation on all boards, otherwise will error if multiple boards are found.\n\"\"\")\n\n parser.add_argument(\n '--get-usbfs',\n action='store_true',\n help='Return the /dev/bus/usb path for a device.')\n parser.add_argument(\n '--get-sysfs',\n action='store_true',\n help='Return the /sys/bus/usb/devices path for a device.')\n parser.add_argument(\n '--get-state',\n action='store_true',\n help='Return the state the device is in. Possible states are: %r' %\n boards.BOARD_STATES)\n parser.add_argument(\n '--get-video-device',\n action='store_true',\n help='Get the primary video device path.')\n parser.add_argument(\n '--get-serial-device',\n action='store_true',\n help='Get the serial device path.')\n\n parser.add_argument(\n '--prefer-hardware-serial',\n help='Prefer the hardware serial port on the Atlys board.')\n\n parser.add_argument(\n '--mode',\n help='Switch mode to given state.',\n choices=boards.BOARD_STATES)\n\n parser.add_argument(\n '--flash-image',\n help='Flash a combined gateware+bios+firmware onto the SPI flash.')\n # FPGA\n parser.add_argument(\n '--load-gateware',\n help='Load gateware onto the FPGA.')\n parser.add_argument(\n '--flash-gateware',\n help='Flash gateware onto the SPI flash which the FPGA boots from.')\n parser.add_argument(\n '--reset-gateware',\n action='store_true',\n help='Reset gateware currently running on the FPGA.')\n # Cypress FX2\n parser.add_argument(\n '--load-fx2-firmware',\n help='Load firmware file onto the Cypress FX2.')\n parser.add_argument(\n '--flash-fx2-eeprom',\n help=\"\"\"\\\nFlash the FX2 eeprom with data. Requires dfu-util and a DFU capable firmware\nrunning on FX2.\n \"\"\")\n # SoftCPU inside the FPGA gateware\n parser.add_argument(\n '--flash-softcpu-bios',\n help=\"\"\"\\\nFlash the bios file for the Soft-CPU onto the SPI flash.\n\"\"\")\n parser.add_argument(\n '--load-softcpu-firmware',\n help=\"\"\"\\\nLoad firmware file onto the Soft-CPU running inside the FPGA.\n\"\"\")\n parser.add_argument(\n '--flash-softcpu-firmware',\n help=\"\"\"\\\nFlash the firmware file for the Soft-CPU onto the SPI flash.\n\"\"\")\n parser.add_argument(\n '--clear-softcpu-firmware',\n action='store_true',\n help=\"\"\"\\\nClear the firmware file for the Soft-CPU on the SPI flash.\n\"\"\")\n # Create aliases for old lm32 name of the softcpu.\n for action in list(parser._actions):\n aliases = set()\n for option in action.option_strings:\n if 'softcpu' in option:\n aliases.add(option.replace('softcpu', 'lm32'))\n if aliases:\n parser.add_argument(\n *aliases,\n dest=action.dest,\n help=argparse.SUPPRESS)\n\n parser.add_argument(\n '--timeout',\n help='How long to wait in seconds before giving up.',\n type=float)\n\n return parser\n\n\ndef find_boards(args):\n all_boards = boards.find_boards(verbose=args.verbose)\n\n # Filter out the boards we don't care about\n filtered_boards = []\n for board in all_boards:\n if args.verbose > 0:\n sys.stderr.write(\"%s in '%s' mode at %s\\n\" % (\n boards.BOARD_NAMES[board.type],\n board.state,\n board.dev.path,\n ))\n for sp in board.dev.syspaths:\n sys.stderr.write(\" %s\\n\" % (sp,))\n\n if board.dev.inuse():\n sys.stderr.write(\n \" Board is currently used by drivers %s\\n\" %\n (board.dev.drivers(),))\n\n if board.tty():\n sys.stderr.write(\" Serial port at %s\\n\" %\n \", \".join(board.tty()))\n\n if args.by_type and args.by_type != board.type:\n if args.verbose > 0:\n sys.stderr.write(\" Ignore as not type %s\\n\" % (args.by_type,))\n continue\n\n filtered_boards.append(board)\n\n return filtered_boards\n\n\ndef switch_mode(args, board, newmode):\n if newmode == \"jtag\":\n # Works on all boards\n pass\n\n elif newmode in (\"serial\", \"eeprom\"):\n assert board.type == \"opsis\", (\n \"{} mode only valid on the opsis.\".format(newmode))\n\n elif newmode == \"operational\":\n raise NotImplemented(\"Not yet finished...\")\n else:\n raise NotImplemented(\"Unknown mode {}\".format(newmode))\n\n if board.state != newmode:\n if args.verbose:\n sys.stderr.write(\n \"Going from {} to {}\\n\".format(board.state, newmode))\n\n old_board = board\n boards.load_fx2(\n old_board, mode=newmode, verbose=args.verbose)\n\n starttime = time.time()\n while True:\n found_boards = find_boards(args)\n\n found_board = None\n for new_board in found_boards:\n if new_board.type == old_board.type:\n if new_board.state == old_board.state:\n continue\n assert new_board.state == newmode\n found_board = new_board\n break\n else:\n time.sleep(1)\n\n if found_board:\n board = found_board\n break\n\n if (args.timeout and starttime - time.time() > args.timeout):\n raise SystemError(\"Timeout!\")\n\n if args.verbose:\n sys.stderr.write(\"Board was {!r}\\n\".format(old_board))\n sys.stderr.write(\"Board now {!r}\\n\".format(board))\n else:\n if args.verbose:\n sys.stderr.write(\n \"Board already in required mode ({!s})\\n\".format(\n board.state))\n\n return board\n\n\ndef main():\n # Parse the command line name\n cmd = os.path.basename(sys.argv[0])\n if cmd.endswith('.py'):\n cmd = cmd.rsplit('.', 1)[0]\n\n board, mode = cmd.split('-', 1)\n boards.assert_in(board, boards.BOARD_TYPES + ['hdmi2usb'])\n POSSIBLE_MODES = ['find-board', 'mode-switch', 'manage-firmware']\n boards.assert_in(mode, POSSIBLE_MODES)\n\n args = args_parser(mode, board).parse_args()\n\n if args.version:\n print(__version__)\n return\n\n if board != \"hdmi2usb\":\n args.by_type = board\n if args.by_type:\n boards.assert_in(args.by_type, boards.BOARD_TYPES)\n\n found_boards = find_boards(args)\n if not args.all:\n assert len(found_boards) == 1, found_boards\n\n MYDIR = os.path.dirname(os.path.abspath(__file__))\n if args.verbose:\n sys.stderr.write(\"My root dir: %s\\n\" % MYDIR)\n\n # The mode-switch commands will switch modes automatically.\n if mode == 'mode-switch':\n assert len(found_boards) == 1\n board = found_boards[0]\n\n if not args.mode and (\n args.load_gateware\n or args.flash_gateware\n or args.reset_gateware\n or args.flash_softcpu_bios\n or args.flash_softcpu_firmware\n or args.clear_softcpu_firmware\n or args.flash_image):\n args.mode = 'jtag'\n\n # FIXME: Hack to work around issue on the FX2.\n # if args.mode == 'jtag' and board.type == 'opsis':\n # board = switch_mode(args, board, 'serial')\n # board = switch_mode(args, board, 'jtag')\n # board = switch_mode(args, board, 'serial')\n\n if args.mode:\n # Switch modes\n board = switch_mode(args, board, args.mode)\n\n found_boards = find_boards(args)\n for board in found_boards:\n # Load firmware onto the fx2\n if args.load_fx2_firmware:\n boards.load_fx2(\n board, filename=args.load_fx2_firmware,\n verbose=args.verbose)\n\n # First, load DFU capable bootloader, then flash the firmware\n elif args.flash_fx2_eeprom:\n board = boards.load_fx2_dfu_bootloader(board, verbose=args.verbose)\n boards.flash_fx2(board, filename=args.flash_fx2_eeprom,\n verbose=args.verbose)\n\n # Load gateware onto the FPGA\n elif args.load_gateware:\n boards.load_gateware(\n board, args.load_gateware, verbose=args.verbose)\n\n # Flash the gateware into the SPI flash on the board.\n elif args.flash_gateware:\n boards.flash_gateware(\n board, args.flash_gateware, verbose=args.verbose)\n\n # Reset the gateware running on the board.\n elif args.reset_gateware:\n boards.reset_gateware(\n board, verbose=args.verbose)\n\n # Flash the gateware into the SPI flash on the board.\n elif args.flash_softcpu_bios:\n boards.flash_bios(\n board, args.flash_softcpu_bios, verbose=args.verbose)\n\n # Load firmware onto the SoftCPU inside the FPGA\n elif args.load_softcpu_firmware:\n if board.type == \"opsis\":\n assert board.state == \"serial\"\n assert board.tty\n print(\"flterm something....\")\n raise NotImplemented(\"Not yet finished...\")\n\n # Flash the firmware into the SPI flash on the board.\n elif args.flash_softcpu_firmware:\n boards.flash_firmware(\n board, args.flash_softcpu_firmware, verbose=args.verbose)\n\n # Clear the firmware into the SPI flash on the board.\n elif args.clear_softcpu_firmware:\n boards.flash_firmware(\n board, filename=None, verbose=args.verbose)\n\n # Flash an image with gateware+bios+firmware into the SPI flash on the\n # board.\n elif args.flash_image:\n boards.flash_image(\n board, args.flash_image, verbose=args.verbose)\n\n found_boards = find_boards(args)\n\n for board in found_boards:\n if not (args.get_usbfs\n or args.get_sysfs\n or args.get_video_device\n or args.get_serial_device):\n print(\"Found %s boards.\" % len(found_boards))\n break\n\n if args.get_usbfs:\n print(board.dev.path)\n\n if args.get_sysfs:\n print(\"\\n\".join(board.dev.syspaths))\n\n if args.get_state:\n print(board.state)\n\n if args.get_video_device:\n assert board.state == \"operational\"\n print(\"???\")\n\n if args.get_serial_device:\n print(board.tty()[0])\n","sub_path":"hdmi2usb/modeswitch/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":12400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"111279636","text":"# coding:utf-8\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport mysql.connector\n\n'''\n说明:\n该网站使用Ajax获取拼音分类的子类链接类似:\n http://www.airitilibrary.cn/Search/ScienceJournalsSecond?pID=K&type=PhoneticsSubject&X-Requested-With=XMLHttpRequest\n'''\n\nHEADER = {\n 'Accept-Encoding': 'zh-CN,zh;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',\n}\n\n\n# 第一步 首要任务是采集 拼音分类中的全部分类 存储在一个map中根据分类中的各个子类获取页面\n\n# 第二步,获取每个分类下面文章的超链接\n\n# 第三步,根据超链接获取详细信息,存在数据库\n\ndef get_html(url=''):\n while True:\n try:\n resp = requests.get(url, headers=HEADER)\n if resp.status_code == 200 and resp:\n break\n except Exception:\n print('连接失败,正在重试。')\n return resp.text\n\n\n# 下载html页面\ndef download_html(url,fileName):\n htm = get_html(url)\n htm = htm.encode('gbk','ignore')\n htm = str(htm,'gbk')\n create_html(fileName,htm)\n\n\ndef create_html(fileName, text):\n fp = open(fileName + '.html', 'w')\n fp.writelines(text)\n #fp.write(text)\n fp.close()\n\n\n# 第一步:\ndef get_parent_category(html=''):\n '''\n 获取该网站下面拼音分类的大分类\n :return: 由一级分类为key的map\n '''\n result = {}\n soup = BeautifulSoup(html, 'html.parser')\n aTagList = soup.select('#divLeftMenuC4 ul li a')\n for name in aTagList:\n key = (name.string).strip()\n result[key] = []\n return result\n\n\ndef get_child_category(data={}):\n '''\n 根据字典中的key查找二级分类\n :param data:\n :return: {'A': ['Ab', 'Ac', 'Ad'], 'B': ['Ba', 'Bb', 'Bc']}\n '''\n result = {}\n\n for key in data:\n url = ''.join(['http://www.airitilibrary.cn/Search/ScienceJournalsSecond?pID=',\n key,\n '&type=PhoneticsSubject&X-Requested-With=XMLHttpRequest',\n ])\n html = get_html(url)\n soup = BeautifulSoup(html, 'html.parser')\n aTagList = soup.find_all('a')\n tmp = []\n for child in aTagList:\n tmp.append(child.string[:2])\n result[key] = tmp\n return result\n\n\ndef url_producer(result={}):\n '''\n 生成每个具体分类的url\n :return:\n '''\n for key in result:\n for value in result[key]:\n yield ''.join(['http://www.airitilibrary.cn/Search/ScienceJournals?FirstID=',\n key,\n '&SecondID=',\n value,\n '&type=PhoneticsSubject&changeColor=C',\n value,\n ])\n\n\ndef get_publications_list(result={}):\n '''\n 生成每个具体分类下面的每个页面的url(包含多个页面)\n :return:\n '''\n for url in url_producer(result):\n html = get_html(url)\n soup = BeautifulSoup(html, 'html.parser')\n page = (soup.select('.goPage > span')[0].string).strip()\n num_regepex = re.compile('\\d{1,}')\n page_num = re.findall(num_regepex, page)[0]\n for i in range(1, int(page_num) + 1):\n yield ''.join([\n url,\n '&page=',\n str(i)\n ])\n\n\n# 第二步 获取每页下面出版期刊的链接\ndef get_detail_url(html=''):\n '''\n 生成每个期刊的URI\n '''\n soup = BeautifulSoup(html, 'html.parser')\n tdTagList = soup.find_all('td', class_='tiBo')\n for tdTag in tdTagList:\n # print(tdTag)\n aTag = tdTag.find('a')\n yield aTag['href']\n\n\n# 第三步 详情页\ndef get_detail(html=''):\n soup = BeautifulSoup(html, 'html.parser')\n\n # 获取期刊名\n journal_name = []\n media_c = ''\n media_e = ''\n aTag = soup.select('.publicationsDown > .publicationsTopleft > a')\n for name in aTag:\n if name.string is None:\n continue\n journal_name.append((name.string).strip())\n media_c_regepx = re.compile(r'(\\d+[\\u4e00-\\u9fa5]+)|([\\u4e00-\\u9fa5])')\n flag = re.findall(media_c_regepx, journal_name[0])\n if len(journal_name) != 2:\n if flag:\n media_c = journal_name[0]\n else:\n media_e = journal_name[0]\n else:\n if flag:\n media_c = journal_name[0]\n media_e = journal_name[1]\n else:\n media_c = journal_name[1]\n media_e = journal_name[0]\n\n # 获取issn\n pTag = soup.select('.bookInformation .content p')\n doi_num = ''\n issn_num = ''\n if len(pTag) == 2:\n issn_regepx = re.compile(r'\\w{4,}-\\w{4,}')\n issn = re.findall(issn_regepx, pTag[0].string)\n if len(issn) != 0:\n issn_num = issn[0]\n # 获取DOI\n doi_regepx = re.compile(r'10.\\w*/\\w*')\n doi = re.findall(doi_regepx, ''.join(pTag[1].strings))\n if len(doi) != 0:\n doi_num = doi[0]\n else:\n issn_regepx = re.compile(r'\\w{4,}-\\w{4,}')\n issn = re.findall(issn_regepx, pTag[0].string)\n if len(issn) != 0:\n issn_num = issn[0]\n\n # 获取出版社\n publish_organ = soup.find_all('div', class_='officiallyRegistered')\n publishtion_name = (publish_organ[1].select(\n '.basicInformation > p')[0].string).strip()\n\n return media_c, media_e, issn_num, doi_num, publishtion_name\n\n\ndef insert2db_detail():\n conn = mysql.connector.connect(user='root', password='', database='work')\n cursor = conn.cursor()\n sql_1 = 'select uri from detail_uri'\n\n cursor.execute(sql_1)\n sql_2 = 'insert into dummy (publication_id,pissn,media_c,media_e,publish_organ,doi)VALUES(%s,%s,%s,%s,%s,%s)'\n rows = cursor.fetchall()\n for uri in rows:\n print(uri)\n publication_id = uri[0].split('/')[-1]\n cursor.execute('select * from dummy WHERE publication_id=%s', [publication_id])\n if cursor.fetchone():\n continue\n url = 'http://www.airitilibrary.cn' + uri[0]\n html = get_html(url)\n media_c, media_e, issn, doi, publish_organ = get_detail(html)\n cursor.execute(sql_2, [publication_id, issn, media_c, media_e, publish_organ, doi])\n conn.commit()\n conn.close()\n print('Done!!')\n\n\nif __name__ == '__main__':\n htm = get_html('http://www.airitilibrary.cn/Search/ScienceJournals')\n data = get_parent_category(htm)\n result = get_child_category(data)\n for url in get_publications_list(result):\n fileName = './html/'+url[len(url)-9:]\n download_html(url,fileName)\n print('Download '+ fileName + ' Successfully!!')\n\n\n","sub_path":"python/work/brary.py","file_name":"brary.py","file_ext":"py","file_size_in_byte":6757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"354872998","text":"#!/usr/bin/python3.6\n\nimport os, sys\nimport os.path as osp\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import MultiStepLR\nimport torch.backends.cudnn as cudnn\n\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\n\nimport logging\nimport numpy as np\nimport random\nimport time\nimport datetime\nimport pprint\nfrom easydict import EasyDict as edict\nimport pandas as pd\nfrom tqdm import tqdm\n\nimport pretrainedmodels\nfrom utils import create_logger\n\nfrom data_loader import get_data_loader\n\nmodel_names = sorted(name for name in models.__dict__\n if name.islower() and not name.startswith(\"__\")\n and callable(models.__dict__[name]))\n\ncudnn.benchmark = True\n\ntimestamp = datetime.datetime.now().strftime(\"%y-%m-%d-%H-%M\")\n\nif len(sys.argv) != 6:\n print(f'usage: {sys.argv[0]} /path/to/model.pk /path/to/test/ ')\n sys.exit()\n\n\ncfg = edict()\n\ncfg.ROOT_DIR = \"..\"\ncfg.EXPERIMENT_DIR = osp.join(cfg.ROOT_DIR, 'models')\nif not osp.exists(cfg.EXPERIMENT_DIR):\n os.makedirs(cfg.EXPERIMENT_DIR)\n\ncfg.DATASET = edict()\ncfg.DATASET.NUM_CLASSES = 340\n\n\nopt = edict()\n\nopt.MODEL = edict()\nopt.MODEL.PRETRAINED = True\nopt.MODEL.IMAGE_SIZE = int(sys.argv[4])\nopt.MODEL.INPUT_SIZE = int(sys.argv[4])\n\nopt.EXPERIMENT = edict()\nopt.EXPERIMENT.CODENAME = 'predict'\nopt.EXPERIMENT.TASK = 'test'\nopt.EXPERIMENT.DIR = osp.join(cfg.EXPERIMENT_DIR, opt.EXPERIMENT.CODENAME)\n\nopt.LOG = edict()\nopt.LOG.LOG_FILE = osp.join(opt.EXPERIMENT.DIR, f'log_{opt.EXPERIMENT.TASK}.txt')\n\nopt.TEST = edict()\nopt.TEST.CHECKPOINT = sys.argv[2]\nopt.TEST.WORKERS = 12\nopt.TEST.BATCH_SIZE = 256\nopt.TEST.OUTPUT = sys.argv[1]\n\n\nif not osp.exists(opt.EXPERIMENT.DIR):\n os.makedirs(opt.EXPERIMENT.DIR)\n\nlogger = create_logger(opt.LOG.LOG_FILE)\nlogger.info('\\n\\nOptions:')\nlogger.info(pprint.pformat(opt))\n\n\nDATA_INFO = cfg.DATASET\n\n# Data-loader of testing set\ntransform_test = transforms.Compose([\n# transforms.Resize((opt.MODEL.IMAGE_SIZE)),\n# transforms.CenterCrop(opt.MODEL.INPUT_SIZE),\n transforms.ToTensor(),\n transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ],\n std = [ 0.229, 0.224, 0.225 ]),\n])\n\ntest_dataset = get_data_loader(sys.argv[5], sys.argv[3], transform_test,\n DATA_INFO.NUM_CLASSES, \"test\",\n opt.MODEL.INPUT_SIZE)\nlogger.info(f'{len(test_dataset)} images are found for test')\n\ntest_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=opt.TEST.BATCH_SIZE, shuffle=False, num_workers=opt.TEST.WORKERS)\n\nlast_checkpoint = torch.load(opt.TEST.CHECKPOINT)\nopt.MODEL.ARCH = last_checkpoint['arch']\n\n\n# create model\nlogger.info(f'using pre-trained model {opt.MODEL.ARCH}')\nmodel = pretrainedmodels.__dict__[opt.MODEL.ARCH](num_classes=1000, pretrained='imagenet')\nassert(opt.MODEL.INPUT_SIZE % 32 == 0)\n\nif opt.MODEL.ARCH.startswith('resnet'):\n model.avgpool = nn.AvgPool2d(opt.MODEL.INPUT_SIZE // 32, stride=1)\n model.last_linear = nn.Linear(model.last_linear.in_features, DATA_INFO.NUM_CLASSES)\nelif opt.MODEL.ARCH.startswith('dpn'):\n model.last_linear = nn.Conv2d(model.last_linear.in_channels, DATA_INFO.NUM_CLASSES, kernel_size=1, bias=True)\n model.test_time_pool = False\nelse:\n model.avgpool = nn.AvgPool2d(opt.MODEL.INPUT_SIZE // 32, stride=1)\n model.last_linear = nn.Linear(model.last_linear.in_features, DATA_INFO.NUM_CLASSES)\n\nmodel = torch.nn.DataParallel(model).cuda()\n\n\nmodel.module.load_state_dict(last_checkpoint['state_dict'])\nlogger.info(f\"Checkpoint '{opt.TEST.CHECKPOINT}' was loaded.\")\n\nlast_epoch = last_checkpoint['epoch']\nsoftmax = torch.nn.Softmax(dim=1).cuda()\n\n# pred_indices = []\n# pred_scores = []\npred_confs = []\n\n# model.eval()\n\nwith torch.no_grad():\n for input in tqdm(test_loader):\n output = model(input)\n top_scores, top_indices = torch.topk(output, k=20)\n top_indices = top_indices.data.cpu().numpy()\n top_scores = top_scores.data.cpu().numpy()\n\n confs = softmax(output)\n# top_confs, _ = torch.topk(confs, k=20)\n# top_confs = top_confs.data.cpu().numpy()\n#\n# pred_indices.append(top_indices)\n# pred_scores.append(top_scores)\n pred_confs.append(confs)\n\n# pred_indices = np.concatenate(pred_indices)\n# pred_scores = np.concatenate(pred_scores)\npred_confs = np.concatenate(pred_confs)\n\n# np.savez(opt.TEST.OUTPUT, pred_indices=pred_indices, pred_scores=pred_scores, pred_confs=pred_confs, checkpoint=opt.TEST.CHECKPOINT)\nnp.save(opt.TEST.OUTPUT, pred_confs)\nlogger.info(f\"Results were saved to {opt.TEST.OUTPUT}\")\n","sub_path":"predict_pretrainedmodels.py","file_name":"predict_pretrainedmodels.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"103860141","text":"import os\nimport unittest\nfrom datetime import datetime\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Session\n\nfrom app_modules.config import Config\nfrom app_modules.model_orm import *\n\ntest_data_dir = os.path.abspath(os.path.dirname(\"data/\"))\nexpert_file = os.path.join(test_data_dir, \"expert_unit_test.csv\")\n\n\n\nclass TestCase(unittest.TestCase):\n\n\n def setUp(self):\n self.engine = create_engine('sqlite:///' + os.path.join(Config.BASE_DIR, 'test.db'))\n\n\n metadata.create_all(self.engine)\n # Base.metadata.create_all(self.engine)\n self.session = Session(self.engine)\n\n def delete(self, table):\n result = self.session.query(table).all()\n for i in result:\n self.session.delete(i)\n self.session.commit()\n\n def tearDown(self):\n # Base.metadata.drop_all(self.engine)\n metadata.drop_all(self.engine)\n # delete all fields\n self.delete(Field)\n # delete all experts\n self.delete(Expert)\n\n\n\n def test_add_field(self):\n # create fields\n self.session.bulk_save_objects([\n Field(\"Finance\"),\n Field(\"Agriculture\"),\n Field(\"Transport\"),\n Field(\"Media\")\n ])\n self.session.commit()\n\n\n\n\n # create experts\n e1 = Expert(\n name='Brown',\n first_name='Jason',\n date_of_birth=datetime.strptime('1982-01-16', \"%Y-%m-%d\").date(),\n email='jason@gmail.com',\n linkedin='linkedin.com/Jason_Brown',\n status_id=1\n )\n e2 = Expert(\n name='Baxter',\n first_name='Patty',\n date_of_birth=datetime.strptime('1982-01-16', \"%Y-%m-%d\").date(),\n email='patty@gmail.com',\n linkedin='linkedin.com/Patty_Baxter',\n status_id=1\n )\n self.session.bulk_save_objects([e1, e2])\n self.session.commit()\n\n # result = self.session.query(Expert).all()\n # for i in result:\n # print(i)\n\n # add filds to experts\n print(\"Before: \")\n for i in e1.fields:\n print(i)\n\n result = self.session.query(expert_field).all()\n for i in result:\n print(i)\n\n e1.add_field(self.session.query(Field).filter_by(field_name=\"Agriculture\").first())\n e1.add_field(self.session.query(Field).filter_by(field_name=\"Finance\").first())\n\n print(\"After: \")\n for i in e1.fields:\n print(i)\n result = self.session.query(expert_field).all()\n for i in result:\n print(i)\n\n\n\n\n\n\n\n\n\nif __name__=='__main__':\n unittest.main()","sub_path":"sandbox/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"57621322","text":"import argparse\nimport json\nimport sys\nimport torch\nfrom torch import optim\nfrom src.utils.torch_lr_finder.lr_finder import LRFinder\nfrom torch.utils.data import DataLoader\nfrom src.dataset.YT_Greenscreen import YT_Greenscreen\nfrom src.gridtrainer import GridTrainer\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\"\"\"\nStarts LR range test for multiple hyperparmaeters and plots them\n\"\"\"\n\n# -cfg src\\models\\LR_Tests\\bs_6\\Deep_resnet50_gruV4bs6num_iter100ID19/train_config.json\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-cfg\", \"--config\",\n help=\"Config location\", type=str)\nargs = parser.parse_args()\n\nwith open(args.config) as js:\n config = json.load(js)\n\nhistorys = []\nweight_decays = [0, 1e-4, 1e-6, 1e-8]\n# weight_decays = [2, 4, 6, 8,10]\n\n\nfor wd in weight_decays:\n sys.stderr.write(f\"\\nParam: {str(wd)}, hist: {historys}\\n\")\n trainer = GridTrainer(config, load_from_checkpoint=False, batch_size=config[\"batch_size\"])\n model = trainer.model\n criterion = trainer.criterion\n val_dataset = YT_Greenscreen(train=False, start_index=0,\n batch_size=config[\"batch_size\"]) #\n val_loader = DataLoader(val_dataset, val_dataset.batch_size, shuffle=False) # val_dataset.batch_size\n optimizer = optim.Adam(model.parameters(), lr=1e-7, weight_decay=wd)\n lr_finder = LRFinder(model, optimizer, criterion, device=device)\n try:\n lr_finder.range_test(trainer.loader, end_lr=10, num_iter=config[\"num_epochs\"]) # val_loader=val_loader\n historys.append(lr_finder.history)\n lr_finder.plot(skip_start=0, skip_end=0)\n except RuntimeError as e:\n if 'out of memory' in str(e):\n print(f'| WARNING: ran out of memory for batch size {wd}')\n torch.cuda.empty_cache()\n weight_decays.remove(wd)\n lr_finder.reset()\n\n# creates a plot of the lr range test results\nfig = plt.figure(figsize=(15, 10))\nax = fig.add_subplot(111)\n\nfor wd, hist in zip(weight_decays, historys):\n lrs = hist[\"lr\"]\n losses = hist[\"loss\"]\n ax.plot(lrs, losses, label=str(wd))\n ax.set_xscale(\"log\")\n# x_ticks = [i*10 ** - exponent for exponent in range(9, 1, -1) for i in range(1,10,1)]\n# plt.xticks(x_ticks)\njs = {\"wds\": weight_decays, \"hists\": historys}\n\nwith open(str(config[\"save_files_path\"]) + \"/results.json\", \"w\") as out:\n json.dump(js, out)\n\nplt.xlabel(\"Learning Rate\")\nplt.ylabel(\"Loss\")\nax.legend(title=\"Weight Decay\")\nplt.title(\"Learning Rate vs Loss for different weight decays\")\n\nfig.savefig(str(config[\"save_files_path\"] + \"/lr_analysis.png\"))\n","sub_path":"src/lr_finder.py","file_name":"lr_finder.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"75463161","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport ast\nimport DataUtils\n\n\nproducTypes = {\n 'RENOVATION LOAN' : {\n 'url':'https://www.cofidis.be/fr/pret-travaux.php',\n 'productID': 'COFI0003'\n },\n 'PERSONAL LOAN': {\n 'url': 'https://www.cofidis.be/fr/pret-personnel.php',\n 'productID': 'COFI0001'\n },\n '2ND_HAND_CAR LOAN': {\n 'url': 'https://www.cofidis.be/fr/pret-voiture-occasion.php',\n 'productID': 'COFI0002'\n },\n\n}\n\n\ndef requestDataforUrl(product):\n url = producTypes[product]['url']\n headers = {\n 'User-Agent': \"PostmanRuntime/7.16.3\",\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"fcd88eea-63ae-416b-9d9b-ba148ef78fe9,2d67f32e-69ff-433e-b6b9-426d6fd94700\",\n 'Host': \"www.cofidis.be\",\n 'Accept-Encoding': \"gzip, deflate\",\n 'Cookie': \"nid=E0283035FA0FFA7582F10A9AFF6B2C5B62B7B1A0\",\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n response = requests.request(\"GET\", url, headers=headers)\n soup = BeautifulSoup(response.text, 'html.parser')\n all_scripts = soup.find_all('script')\n for script in all_scripts:\n if 'allsimuJsonA' in script.text:\n try:\n return ast.literal_eval(re.search('{.*}', script.text).group(0))\n except:\n return {}\n\ndef computeRate(rateString):\n try:\n rate_num = rateString.replace(',', '.')\n return float(rate_num[:5])\n except:\n rate_num = rateString.replace(',', '.')\n return float(rate_num[:4])\n\ndef data_for_product(product):\n data = []\n loanData = requestDataforUrl(product)\n for loanAmnt in loanData['simu']['mnt']:\n for loanJson in loanAmnt['am']['am']:\n data.append(dict(type=product, amount=loanAmnt['m'], rate=computeRate(loanJson['a']), duration=loanJson['d'],\n productID=producTypes[product]['productID']))\n return data\n\ndef bankData():\n bank_data = []\n for pdt in producTypes:\n bank_data.append(data_for_product(pdt))\n return bank_data\n\ndef cofidisLoanScraper():\n print('COFIDIS SCRAPE PROCESSING ...')\n tab_col = ['PROVIDER ', 'PRODUCTID', 'LOAN TYPE', 'MIN AMT', 'MAX AMT', 'TERM', 'RATE']\n return DataUtils.data_processing_last(bankData(), 'COFIDIS', 'COFIDIS SCRAPE', 'cofidis_rate', tab_col)\n\n\n\n\n# cofidisLoanScraper()\n# DataUtils.scrape_and_notify(cofidisLoanScraper(), \"cofidis\", DataUtils.test_mail)\n\n\n\n","sub_path":"Cofidis_rates.py","file_name":"Cofidis_rates.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"378679044","text":"from keras.datasets import mnist\nfrom keras.models import Model\nfrom keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense\nfrom keras.utils import np_utils\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n# Best run: 0.9901\n\n# Load data\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n#Format images\nx_train = x_train.reshape(60000, 28, 28, 1)\nx_test = x_test.reshape(10000, 28, 28, 1)\nx_train = x_train.astype('float32') #use floats so we can scale below\nx_test = x_test.astype('float32')\n#Scale inputs to 0-1 rather than 0-255\nx_train /= 255\nx_test /= 255\n\n# Make labels have one-hot encoding\ny_train = np_utils.to_categorical(y_train, 10)\ny_test = np_utils.to_categorical(y_test, 10)\n\n# Initialise layers\ninputlayer = Input(shape = (28, 28, 1))\nhidden = Conv2D(32, 3, strides=(1, 1), activation='tanh')(inputlayer)\nhidden = MaxPooling2D()(hidden)\nhidden = Conv2D(32, 3, strides=(1, 1), activation='tanh')(hidden)\nhidden = MaxPooling2D()(hidden)\noutputlayer = Flatten()(hidden)\noutputlayer = Dense(100, activation='sigmoid')(outputlayer)\noutputlayer = Dense(100, activation='sigmoid')(outputlayer)\noutputlayer = Dense(10, activation='sigmoid')(outputlayer)\n\n# Make an instance of the model with the above layers, choose its optimizer/loss etc., and train it\nmodel = Model(inputlayer, outputlayer)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel.fit(x_train, y_train, batch_size=32, epochs=5)\n\n# Evaluate trained network\nscore = model.evaluate(x_test, y_test, batch_size=32)\nprint('Test score:', score[0])\nprint('Test accuracy:', score[1])\n\n# Make lists of which test images were correct and incorrect\npredicted_classes = model.predict(x_test) # lists of probabilities of each class\npredicted_classes = [np.argmax(example) for example in predicted_classes] # pick out the associated number\ny_test = np.argmax(y_test, axis=1) #undo one-hot encoding on y_test, so we can compare\ncorrect_indices = np.nonzero(predicted_classes == y_test)[0]\nincorrect_indices = np.nonzero(predicted_classes != y_test)[0]\n\n# See some incorrectly-classes images\nfig, axes = plt.subplots(nrows=3, ncols=3)\nfig.tight_layout()\nrandom.shuffle(incorrect_indices)\nfor i, incorrect in enumerate(incorrect_indices[:9]):\n plt.subplot(3,3,i+1)\n plt.imshow(x_test[incorrect].reshape(28,28), cmap='gray', interpolation='none')\n plt.title('Predicted %s, Actual %s' %(predicted_classes[incorrect], y_test[incorrect]))\nplt.title('Some wrongly-classified images')\nplt.show()","sub_path":"MNIST convnet.py","file_name":"MNIST convnet.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"250742877","text":"from scaling_by_ratio import Scaling\n\ncalc = Scaling() # instatiate the object\ncalc.set_target_area() # set the area as a constant for the loop\n\nnot_done = True\n\n# run calculations till your eyes bleed\nwhile(not_done):\n calc.set_width()\n calc.set_height()\n calc.calc_ratio()\n calc.print_final()\n finished = input(\"type 'done' if you are finished. \\\n \\nOtherwise, press enter: \")\n if finished == 'done':\n not_done = False\n \n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"414852607","text":"import os\nfrom .config import config\nfrom .utils import prepare_directory\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\nplt.rcParams.update(**config.rc)\nsns.set_style(rc=config.rc)\n\n\ndef _style_plot(axes):\n axes.legend(loc=0)\n axes.grid(\"on\")\n\n return axes\n\n\ndef _generate_plot(df, column, subject):\n _, axes = plt.subplots(1, 1, figsize=(20, 8))\n\n g = sns.lineplot(x=df[\"timestamp\"], y=df[column], ax=axes)\n\n axes.set_xlabel(\"Timestamp\")\n axes.set_ylabel(column.title())\n axes.set_title(subject + f\": {column}\")\n\n axes = _style_plot(axes)\n\n return g\n\n\ndef _generate_histogram(df, column, subject):\n \n _, axes = plt.subplots(1, 1, figsize=(20, 8))\n\n g = sns.distplot(df[column], ax=axes)\n\n axes.set_xlabel(column.title())\n axes.set_title(subject + f\": {column} distribution.\")\n\n axes = _style_plot(axes)\n\n return g\n\n\ndef _plot_file_path(subject, metric, path):\n\n filename = f\"{subject}_{metric}.png\"\n return os.path.join(path, filename)\n\n\ndef _plot_metrics(df, column, subject, show_plots):\n\n\n path = prepare_directory('plots')\n file_path = _plot_file_path(subject, column, path)\n\n if not os.path.exists(file_path):\n g = _generate_plot(df, column, subject)\n g.figure.savefig(file_path)\n else:\n print(f\"{file_path} exists.\")\n\n if show_plots:\n plt.show()\n plt.close()\n\n\ndef generate_plots(df, show_plots=False):\n for subject in df[\"subject\"].unique():\n df_subject = df[df[\"subject\"] == subject]\n\n for column in config.columns_to_plot:\n _plot_metrics(df_subject, column, subject, show_plots)\n","sub_path":"glucose_forecast/plot_measurements.py","file_name":"plot_measurements.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"538886287","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport os\n\nfrom tests.common import TestCase\nfrom tests.common import EXAMPLE_DATA_PATH\nfrom xml.etree import ElementTree\n\nfrom clocwalk.plugins.maven import start\n\n\nclass POMTestCase(TestCase):\n\n def setUp(self):\n self.pom_dir = [\n 'a/a1/pom.xml',\n 'b/pom.xml',\n 'pom.xml',\n ]\n\n def test_start_3_dir(self):\n result = start(code_dir=os.path.join(EXAMPLE_DATA_PATH, 'plugins', 'pom'), skipNewVerCheck=True)\n for item in result:\n self.assertIn(item['origin'], self.pom_dir)\n\n def test_start_new_version(self):\n result = start(code_dir=os.path.join(EXAMPLE_DATA_PATH, 'plugins', 'pom', 'a'))\n for item in result:\n self.assertIsNotNone(item['new_version'])\n\n def test_start_dir(self):\n result = start(code_dir='', skipNewVerCheck=True, tag_filter=['org.mybatis.generator'])\n for item in result:\n self.assertIn(item['origin'], self.pom_dir)\n","sub_path":"tests/unit/plugins/test_maven.py","file_name":"test_maven.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"389046312","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 29 14:01:30 2019\n\n@author: ispluser\n\"\"\"\n\nfrom osgeo import gdal\nfrom osgeo import ogr\nimport numpy as np\nimport math\nfrom scipy.stats import mode, zscore\n\ndef calculateAcreage(crop,filepath):\n print(\"Analysing\",crop,\"data!\")\n dataset = gdal.Open(filepath)\n band = dataset.GetRasterBand(1)\n\n rast_array = np.array(band.ReadAsArray())\n tcount = 0\n count = 0\n\n withoutNoData = []\n\n for row in rast_array:\n for element in row:\n tcount = tcount + 1\n if math.isnan(element) == False and element != 0.0:\n count = count+1\n withoutNoData.append(element)\n\n '''\n minval = band.ComputeStatistics(0)[0]\n maxval = band.ComputeStatistics(0)[1]\n meanval = band.ComputeStatistics(0)[2]\n sdval = band.ComputeStatistics(0)[3]\n '''\n\n minval = min(withoutNoData)\n maxval = max(withoutNoData)\n meanval = np.mean(withoutNoData)\n medianval = np.median(withoutNoData)\n modeval = float(mode(withoutNoData)[0])\n modefreq = int(mode(withoutNoData)[1])\n sdval = np.std(withoutNoData)\n varianceval = np.var(withoutNoData)\n rangeval = maxval - minval\n coefvariation = sdval * 100 / meanval\n# zs = zscore(withoutNoData)\n# threshold = 3\n# outliers = np.where((zs) > threshold)\n \n print(\"Min Value:\", minval)\n print(\"Max Value:\", maxval)\n print(\"Mean :\", meanval)\n print(\"Median :\", medianval)\n print(\"Mode :\", modeval)\n print(\"Mode frequency :\", modefreq)\n print(\"Variance :\", varianceval)\n print(\"Standard Deviation :\", sdval)\n print(\"Range :\", rangeval)\n print(\"Coefficient of Variation :\", coefvariation)\n# print(\"Zscore:\", zs)\n# print(\"unique values:\", np.unique(zs,return_counts = True))\n# print(\"outliers:\", outliers)\n area = count * 9/1000000\n\n #tarea = tcount * 9/1000000\n #print(\"Total pixels:\", tcount)\n #print(\"Total\", crop ,\"pixels:\",count)\n #print(\"Total area:\", tarea, \"sqkm\")\n\n print(\"Total\", crop ,\"area:\", area, \"sqkm\")\n print(\"------------------------------------\")\n return(varianceval,area)\n\nshpfile1 = ogr.Open(\"/home/ispluser/jay/Wheat_Paddy_ahm/Tutorial1/Ahm_taluk_shp/Dholka.shp\")\nshape1 = shpfile1.GetLayer(0)\nfeature1 = shape1.GetFeature(0)\ntalukArea1 = feature1.geometry().GetArea()/1000000\n#\nshpfile2 = ogr.Open(\"/home/ispluser/Dimple/Wheat_Paddy_ahm/Tutorial1/Ahm_taluk_shp/Saanand.shp\")\nshape2 = shpfile2.GetLayer(0)\nfeature2 = shape2.GetFeature(0)\ntalukArea2 = feature2.geometry().GetArea()/1000000\n\n#paddyacreage = calculateAcreage(\"Paddy\", \"/home/ispluser/jay/Wheat_Paddy_ahm/Tutorial1/NDVI_Mask_tiff/dholka_wheat_tiff.tif\")\nvariance1, wheatacreage_1 = calculateAcreage(\"Wheat\", \"/home/ispluser/jay/Wheat_Paddy_ahm/Tutorial1/NDVI_Mask_tiff/dholka_wheat_tiff.tif\")\nvariance2, wheatacreage_2 = calculateAcreage(\"Wheat\", \"/home/ispluser/jay/Wheat_Paddy_ahm/Tutorial1/NDVI_Mask_tiff/sanand_wheat_tiff.tif\")\n\ntaluka_variances =[variance1,variance2] \nprint(\"Area of taluka1:\" , talukArea1, \"sqkm\")\n#print(\"Area of taluka2:\" , talukArea2, \"sqkm\")\nprint(\"------------------------------------\")\n\n#print(\"Percentage area of Paddy:\", 100 * paddyacreage / villageArea, \"%\")\nprint(\"Percentage area of Wheat:\", 100 * wheatacreage_1 / talukArea1, \"%\")\nprint(\"Percentage area of Wheat:\", 100 * wheatacreage_2 / talukArea2, \"%\")\n\nprint(\"The taluka with maximum variance is {}\".format(np.argmax(taluka_variances)))\n\n\n# compare variability across taluka :\n\ndef compare_taluka(shapefile_path_list, imagefile_path_list):\n acreage = []\n variability = []\n for i in range(len(shapefile_path_list)):\n shpfile = ogr.Open(shapefile_path_list[i])\n shape = shpfile.GetLayer(0)\n feature = shape.GetFeature(0)\n talukArea = feature.geometry().GetArea()/1000000\n variance, cropAcreage = calculateAcreage(\"Wheat\", imagefile_path_list[i])\n acreage.append(cropAcreage)\n variability.append(variance)\n max_val , min_val = np.argmax(variability), np.argmin(variability)\n min_region_name = imagefile_path_list[min_val].split('/')[-1].split('.')[0].split('_')[0]\n max_region_name = imagefile_path_list[max_val].split('/')[-1].split('.')[0].split('_')[0]\n return min_region_name , max_region_name\n \nshape_list = ['/home/ispluser/jay/Wheat_Paddy_ahm/Tutorial1/Ahm_taluk_shp/Dholka.shp','/home/ispluser/jay/Wheat_Paddy_ahm/Tutorial1/Ahm_taluk_shp/Saanand.shp']\nimage_list = ['/home/ispluser/jay/Wheat_Paddy_ahm/Tutorial1/NDVI_Mask_tiff/dholka_wheat_tiff.tif','/home/ispluser/jay/Wheat_Paddy_ahm/Tutorial1/NDVI_Mask_tiff/sanand_wheat_tiff.tif']\n\n\nminimum_region , maximum_region = compare_taluka(shape_list, image_list)\nprint(\"The Taluka with minimum variability is {}\".format(minimum_region))\nprint(\"The Taluka with maximum variability is {}\".format(maximum_region))\n\n\n########\ndef data(imagefile_path_list):\n conditioned_data = []\n for i in range(len(imagefile_path_list)):\n dataset = gdal.Open(imagefile_path_list[i])\n band = dataset.GetRasterBand(1)\n \n rast_array = np.array(band.ReadAsArray())\n tcount = 0\n count = 0\n \n withoutNoData = []\n \n for row in rast_array:\n for element in row:\n tcount = tcount + 1\n if math.isnan(element) == False and element != 0.0:\n count = count+1\n withoutNoData.append(element)\n conditioned_data.append(withoutNoData) \n \n return conditioned_data\nconditioned_data = data(image_list)\n\ndef grouping(data_list):\n classified_list = []\n conditioned = []\n for i in range(len(data_list)):\n data = data_list[i]\n classified = {}\n counts = {'Poor':0,\"Medium\": 0, \"Good\": 0, \"Excellent\": 0}\n for i in range(len(data)):\n if 0 < data[i] <= 0.25:\n classified[data[i]] = 'Poor';\n counts['Poor'] += 1\n elif 0.26 < data[i] <= 0.50:\n classified[data[i]] = 'Medium';\n counts['Medium'] += 1\n elif 0.51 < data[i] <= 0.75: \n classified[data[i]] = 'Good';\n counts['Good'] += 1\n else:\n classified[data[i]] = 'Excellent'\n counts['Excellent'] += 1\n classified_list.append(classified)\n conditioned.append(counts)\n return classified_list,conditioned\n\ncategorized, count_dict = grouping(conditioned_data)\n\n#%%\n#histogram of conditional data:\nimport matplotlib.pyplot as plt\n\ncondition = count_dict[0].keys()\n\nplt.plot(condition, count_dict[0].values(), color = 'b', label = 'Taluka1')#, width = 0.25)\n\nplt.plot(condition, count_dict[1].values(), color = 'r', label = 'Taluka2')\nplt.legend()#, width = 0.35)\nplt.show()\n","sub_path":"compare_taluk.py","file_name":"compare_taluk.py","file_ext":"py","file_size_in_byte":6838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"484332937","text":"def say_hi(name):\n raise Exception(\"IAMERROR\")\n #a = 1/0\n return f\"Hi! {name}\"\n\n\ndef handle_hi(msg):\n print(f\"msg is {msg}\")\n name = say_hi(\"bbroy\")\n return f\"{name} : {msg}\"\n\n #name = \"default\"\n #return f\"{name} : {msg}\"\n\n\ndef handle_bye():\n try:\n a = handle_hi(\"INcoming\")\n b = handle_hi(\"Outgoing.\")\n print(f\"{a},{b}\")\n except Exception as e:\n import traceback\n traceback.print_exc()\n print(\"bla blu bup.\")\n\n\nhandle_bye()\n","sub_path":"pyExceptions/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"90068192","text":"# Source : https://oj.leetcode.com/problems/reverse-integer/\n# Author : imkh\n# Date : 2015-01-20\n\n# Reverse digits of an integer.\n# Example1: x = 123, return 321\n# Example2: x = -123, return -321\n\nclass Solution:\n # @return an integer\n def reverse(self, x):\n result= 0\n sign = 1 if x > 0 else -1\n x = abs(x)\n while x > 0:\n result = result * 10 + x % 10\n x /= 10\n\n if result > 2147483647:\n return 0\n else:\n return sign * result\n","sub_path":"reverse-integer/reverse-integer-1.py","file_name":"reverse-integer-1.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"289901026","text":"\"\"\"Batching and dispatching jobs to work on.\"\"\"\n\nfrom contextlib import contextmanager\n\nfrom h_api.exceptions import CommandSequenceError\n\n\nclass CommandBatcher:\n \"\"\"Collects together similar commands, passing them to a callback.\n\n This object will group similar commands together and decide when they\n should be executed. The number of commands should always be a number we\n are comfortable dealing with in memory at once.\n \"\"\"\n\n def __init__(self, on_flush, batch_size=100):\n \"\"\"Create a new command batcher.\n\n :param on_flush: Callback to call when it's time to execute\n :param batch_size: The max size of a batch\n \"\"\"\n\n self.on_flush = on_flush\n\n self.batch = None\n self.batch_size = batch_size\n self.finished_tasks = set()\n self.current_task = None\n\n def flush(self):\n \"\"\"Flush the current batch of commands to the callback.\"\"\"\n\n if not self.batch:\n self.batch = None\n return\n\n command_type, data_type = self.current_task\n self.on_flush(command_type, data_type, self.batch)\n\n self.batch = None\n\n @contextmanager\n def add(self, command):\n \"\"\"Add a command to the batch using a context manager.\n\n Making this a context manager allows us to do the prep-work we need\n to before and after a command is added.\n\n In order to ensure that the final batch of commands is correctly\n processed the caller must call `flush()` after all `add()` calls are\n complete.\n \"\"\"\n\n self._check_sequence(command)\n\n # The above command might result in flushing previous commands. This\n # may be necessary to process this command as the previous batch could\n # return a concrete id for a reference we rely on now.\n yield\n\n if self.batch is None:\n self.batch = []\n\n self.batch.append(command)\n\n if self.batch and len(self.batch) >= self.batch_size:\n self.flush()\n\n def _check_sequence(self, command):\n \"\"\"Check to see if the command sequence is wrong / requires a flush.\"\"\"\n\n command_key = (command.type, command.body.type)\n\n if self.current_task is None:\n self.current_task = command_key\n return\n\n if command_key == self.current_task:\n return\n\n if command_key in self.finished_tasks:\n raise CommandSequenceError(\"Cannot return to old task\")\n\n # Looks like we are switching to a new task\n self.flush()\n\n self.finished_tasks.add(self.current_task)\n self.current_task = command_key\n","sub_path":"src/h_api/bulk_api/command_batcher.py","file_name":"command_batcher.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"125237150","text":"import argparse\nfrom typing import Sequence\n\nimport cv2\nimport matplotlib.image as mpimg\nimport numpy as np\nimport os\n\nfrom image_processing import grayscale, region_of_interest, gaussian_blur, canny, hough_lines, weighted_img, \\\n make_lines_image\n\nfrom line_math import Line, average_of_lines, slope, extrapolate\n\nRHO = 2\nTHETA = np.pi / 180\nHOUGH_LINE_THRESHOLD = 50\nMIN_LINE_LEN = 120\nMAX_LINE_GAP = 150\nTOP_VERTEX_HEIGHT_PERCENT = (6 / 11)\nTOP_VERTEX_WIDTH_PERCENT = (1 / 2)\n\nGAUSSIAN_BLUR_KERNEL_SIZE = 3\n\nLOW_CANNY_THRESHOLD = 200\nHIGH_CANNY_THRESHOLD = 300\n\nMIN_ACCEPTABLE_Y_POSITION = 400\nMIN_SLOPE = .4\n\nLANE_TOP_Y_POSITION = 320\n\nPIPELINE_EXAMPLES = \"./pipeline_examples/\"\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"annotate lane lines on an image of a road.\")\n parser.add_argument(\"src_path\", help=\"The path of the source image you would like annotated.\")\n parser.add_argument(\"dest_path\", help=\"where you'd like us to leave your nice annotated image.\")\n args = parser.parse_args()\n annotate_lanes_file(args.src_path, args.dest_path)\n print(\"Annotated file:\", args.src_path, \"and left it at:\", args.dest_path, \"Drive safe!\")\n\n\ndef annotate_lanes_batch(source_dir: str, dest_dir: str) -> None:\n \"\"\" Annotate a directory filled with images \"\"\"\n image_path_endings = os.listdir(source_dir)\n source_paths = (os.path.join(source_dir, ending) for ending in image_path_endings)\n dest_paths = (os.path.join(dest_dir, ending) for ending in image_path_endings)\n\n for (source, dest) in zip(source_paths, dest_paths):\n annotate_lanes_file(source, dest)\n\n\ndef annotate_lanes_file(source_file_path: str, dest_file_path: str) -> None:\n image = mpimg.imread(source_file_path)\n image_copy = np.copy(image)\n with_lane_highlights = annotate_lanes(image_copy)\n # cv2.imwrite(dest_file_path, with_lane_highlights)\n _write_color_image(dest_file_path, with_lane_highlights)\n\n\ndef annotate_lanes(image: np.ndarray) -> np.ndarray:\n \"\"\" Returns a copy of the input image with lanes annotated. \"\"\"\n first_img = np.copy(image)\n height, width, _ = first_img.shape\n\n lines = _find_lines(first_img)\n lanes = _reduce_to_lanes(lines)\n\n # draw from the bottom of the image to near the horizon\n lanes = [extrapolate(lane, height, LANE_TOP_Y_POSITION) for lane in lanes]\n\n lines_img = make_lines_image(lanes, height, width)\n # first_img = region_of_interest(first_img, [slightly_smaller_vertices])\n return weighted_img(lines_img, first_img, .9)\n\n\ndef find_lanes(image: np.ndarray) -> Sequence[Line]:\n \"\"\" Returns a list of Line objects corresponding to lanes. \"\"\"\n first_img = np.copy(image)\n height, width, _ = first_img.shape\n\n lines = _find_lines(first_img)\n lanes = _reduce_to_lanes(lines)\n\n # draw from the bottom of the image to near the horizon\n return [extrapolate(lane, height, LANE_TOP_Y_POSITION) for lane in lanes]\n\n\ndef _find_lines(image: np.ndarray) -> Sequence[Line]:\n height, width, _ = image.shape\n gray = grayscale(image)\n vertices = _region_of_interest_vertices(height, width)\n region = region_of_interest(gray, [vertices]) # HMM, how do we get rid of the lines from our region selection\n blurred = gaussian_blur(region, GAUSSIAN_BLUR_KERNEL_SIZE) # maybe we can filter all non lanes by raising blur\n canny_img = canny(blurred, LOW_CANNY_THRESHOLD, HIGH_CANNY_THRESHOLD)\n\n slightly_smaller_vertices = _vertices_just_inside(vertices)\n cropped_canny = region_of_interest(canny_img, [slightly_smaller_vertices])\n\n return hough_lines(cropped_canny, RHO, THETA, HOUGH_LINE_THRESHOLD, MIN_LINE_LEN, MAX_LINE_GAP)\n\n\ndef _reduce_to_lanes(lines: Sequence[Line]) -> Sequence[Line]:\n \"\"\" Heuristically reduce a list of lines to what we believe are lanes. \"\"\"\n\n # filter lines that are too high to be lane lines\n lines = [line for line in lines if line.y1 > MIN_ACCEPTABLE_Y_POSITION or line.y2 > MIN_ACCEPTABLE_Y_POSITION]\n\n # Let's get rid of horizontal lines.\n lines = [line for line in lines if np.abs(slope(line)) > MIN_SLOPE]\n\n left_lanes = [line for line in lines if slope(line) > 0]\n right_lanes = [line for line in lines if slope(line) < 0]\n left_avg = average_of_lines(left_lanes)\n right_avg = average_of_lines(right_lanes)\n return left_avg + right_avg\n\n\ndef _region_of_interest_vertices(height: int, width: int) -> np.array:\n \"\"\" Create a region of interest by connecting a specified point to the two bottom corners of the img \"\"\"\n pinch = 10\n top_vertex_y = int(TOP_VERTEX_HEIGHT_PERCENT * height)\n top_vertex_x = int(TOP_VERTEX_WIDTH_PERCENT * width)\n return np.array([(pinch, height), (top_vertex_x, top_vertex_y), (width - pinch, height)])\n\n\ndef _vertices_just_inside(region: np.array) -> np.array:\n \"\"\" Returns a region just inside a given region. Regions are represented by a 3 element np.array of tuples. \"\"\"\n delta_pixels = 70\n bottom_left, top, bottom_right = region\n inner_bottom_left = (bottom_left[0] + delta_pixels, bottom_left[1] - int(delta_pixels / 10))\n inner_top = (top[0], top[1] + int(delta_pixels / 10))\n inner_bottom_right = (bottom_right[0] - delta_pixels, bottom_right[1] - int(delta_pixels / 10))\n return np.array([inner_bottom_left, inner_top, inner_bottom_right])\n\ndef _write_color_image(path, img):\n img_out = cv2.cvtColor(img, cv2.COLOR_BGR2RGB);\n cv2.imwrite(path, img_out)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"detect_lanes.py","file_name":"detect_lanes.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"638221340","text":"#\r\n# Name:\r\n# File: grid50.py\r\n# Date: Spring 2016\r\n#\r\n# Desc: \r\n\r\nfrom graphics import *\r\n\r\nclass Grid50:\r\n\r\n def __init__(self, win):\r\n \r\n # Constructor for 50 by 50 cell grid\r\n # located in left side of 640 by 480 window \r\n \r\n self.win = win # should be 640 x 480 pixel window\r\n self.x_lo = 39 # LL grid coordinates \r\n self.y_lo = 39\r\n self.size = 8 # size of cell in pixels\r\n self.max = 50 # mumber of rows and columns\r\n self.fColor = \"red\" # color of \"on\" cell\r\n self.bColor = \"white\" # color of \"off\" cell\r\n \r\n # 50 by 50 grid array of integers\r\n \r\n self.grid = []\r\n for i in range(self.max):\r\n self.grid = self.grid + [[0] * self.max] \r\n \r\n def clearGrid(self):\r\n \r\n # clear cell matrix and grid array\r\n \r\n for y in range(self.max):\r\n for x in range(self.max):\r\n if self.grid[y][x] != 0:\r\n self.grid[y][x] = 0\r\n self.__drawOne(x+1,y+1) # use 1 based indexing\r\n \r\n def __setGrid(self, x, y):\r\n \r\n # sets grid(x,y) to 1\r\n # uses 1 based indexing\r\n \r\n self.grid[y-1][x-1] = 1\r\n\r\n def __unsetGrid(self, x, y):\r\n \r\n # sets grid(x,y) to 0\r\n # uses 1 based indexing\r\n \r\n self.grid[y-1][x-1] = 0\r\n\r\n def __drawOne(self, x, y):\r\n \r\n # draws cell at grid(x,y)\r\n # uses 1 based indexing\r\n \r\n p1 = Point(self.x_lo + (x-1)*self.size,\r\n self.y_lo + (y-1)*self.size)\r\n p2 = Point(self.x_lo + x*self.size, \r\n self.y_lo + y*self.size)\r\n square = Rectangle(p1, p2)\r\n square.setOutline(\"black\")\r\n if self.grid[y-1][x-1] == 1:\r\n square.setFill(self.fColor)\r\n else:\r\n square.setFill(self.bColor)\r\n square.draw(self.win)\r\n\r\n def drawGrid(self):\r\n \r\n # draws all cells\r\n \r\n for y in range(self.max):\r\n for x in range(self.max):\r\n self.__drawOne(x+1,y+1)\r\n \r\n def drawFrame(self):\r\n \r\n # draws cell matrix frame\r\n \r\n mysize = self.size * self.max + 4\r\n xlo = self.x_lo - 2\r\n ylo = self.y_lo - 2\r\n xhi = xlo + mysize\r\n yhi = ylo + mysize\r\n Line(Point(xlo, ylo), Point(xhi, ylo)).draw(self.win)\r\n Line(Point(xhi, ylo), Point(xhi, yhi)).draw(self.win)\r\n Line(Point(xhi, yhi), Point(xlo, yhi)).draw(self.win)\r\n Line(Point(xlo, yhi), Point(xlo, ylo)).draw(self.win)\r\n length = self.max * self.size # should be 400\r\n for z in range(self.max+1):\r\n p1 = Point(self.x_lo, self.y_lo + z * self.size)\r\n p2 = Point(self.x_lo + length, self.y_lo + z * self.size)\r\n Line(p1, p2).draw(self.win)\r\n p1 = Point(self.x_lo + z * self.size, self.y_lo)\r\n p2 = Point(self.x_lo + z * self.size, self.y_lo + length)\r\n Line(p1, p2).draw(self.win)\r\n \r\n def setCell(self, x, y):\r\n\r\n # Sets cell(x,y) to \"on\"\r\n # uses 1 based indexing\r\n \r\n if (0 < x <= self.max) and (0 < y <= self.max):\r\n self.grid[y-1][x-1] = 1\r\n self.__drawOne(x,y)\r\n\r\n def unsetCell(self,x, y):\r\n\r\n # Unsets cell(x,y) \r\n # uses 1 based indexing\r\n \r\n if (0 < x <= self.max) and (0 < y <= self.max):\r\n self.grid[y-1][x-1] = 0\r\n self.__drawOne(x,y)\r\n\r\n def isCellOn(self, x, y):\r\n \r\n # returns True if grid(x,y) == 1\r\n # uses 1 based indexing\r\n \r\n return self.grid[y-1][x-1] == 1\r\n\r\n def toggleCell(self, p1):\r\n\r\n # turns \"on\" cell \"off\" and \"off\" cell \"on\"\r\n \r\n x = p1.getX()\r\n y = p1.getY()\r\n xhi = self.x_lo + self.size * self.max\r\n yhi = self.y_lo + self.size * self.max\r\n if (self.x_lo <= x < xhi) and (self.y_lo <= y < yhi):\r\n x = int((x - self.x_lo)/self.size) + 1\r\n y = int((y - self.y_lo)/self.size) + 1\r\n \r\n # uses 1 based indexing\r\n \r\n if self.grid[y-1][x-1] == 0:\r\n self.setCell(x,y)\r\n else:\r\n self.unsetCell(x, y)\r\n","sub_path":"lab11/Grid50.py","file_name":"Grid50.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"72802368","text":"from django.shortcuts import render, get_object_or_404\nfrom .forms import NewsUserForm\nfrom .models import NewsUsers\nfrom django.conf import settings \nfrom django.contrib import messages\n# For html mail sending\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\nfrom django.core.mail import send_mail\nfrom .models import UnsubscribingUser, NewsUsers\n# Unsubscribe\nfrom datetime import datetime\nfrom django.http import Http404\n\n# Create your views here.\ndef newsletter_subscribe(request):\n if request.method == 'POST':\n form = NewsUserForm(request.POST)\n if form.is_valid():\n instance = form.save(commit=False)\n if NewsUsers.objects.filter(email=instance.email).exists():\n messages.info(request, \"Your email is already added to my database.\")\n else:\n instance.save()\n messages.info(request, \"Your email has been submitted to my database. Check your inbox to make sure.\")\n uu = UnsubscribingUser.objects.create(user=NewsUsers.objects.get(email=instance.email))\n html_message = render_to_string(\"newsletter/subscribers_message.html\", context={'user_name': instance.name, 'uu': uu})\n plain_message = strip_tags(html_message)\n send_mail('Subscribtion', plain_message, settings.DEFAULT_FROM_EMAIL, [instance.email], html_message=html_message, fail_silently=False)\n else:\n form = NewsUserForm()\n context = {'form': form}\n template = \"newsletter/subscribe.html\"\n return render(request, template, context)\n\ndef unsubscribe(request, unsub_id):\n try:\n uu = get_object_or_404(UnsubscribingUser, uuid=unsub_id)\n except:\n context = {\"message\": 'You are no longer subscribed to my blog!'}\n return render(request, \"newsletter/unsubscribe.html\", context=context)\n if uu.expires >= datetime.now(uu.expires.tzinfo):\n NewsUsers.objects.filter(email=uu.user.email).delete()\n uu.delete()\n context = {'message': 'You are no longer subscribed to my blog!'}\n return render(request, \"newsletter/unsubscribe.html\", context=context)\n else:\n uu.delete()\n context = {'message': 'The link is expired please try unsubscribing on my next mail.'}\n return render(request, \"newsletter/unsubscribe.html\", context=context)\n\n\n \n\n","sub_path":"newsletter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"98998720","text":"import requests\n\nBASE = \"http://127.0.0.1:5000/\"\n\ndata = [{\"user_id\":\"123456\",\"reference_number\":\"1234\",\"destination\":\"KZN\",\"date\":\"2 Des\",\"location\":\"LP\",\"timeslot\":\"12:00\"},\n\t\t{\"user_id\":\"963258\",\"reference_number\":\"7894\",\"destination\":\"PTA\",\"date\":\"24 March\",\"location\":\"MPM\",\"timeslot\":\"12:00\"},\n\t\t{\"user_id\":\"789456\",\"reference_number\":\"9632\",\"destination\":\"JHB\",\"date\":\"5 April\",\"location\":\"NW\",\"timeslot\":\"12:00\"},\n\t\t{\"user_id\":\"112233\",\"reference_number\":\"1597\",\"destination\":\"CPT\",\"date\":\"30 Jan\",\"location\":\"FS\",\"timeslot\":\"12:00\"}]\n\n\nfor i in range(len(data)):\n\tresponse = requests.put(BASE + \"package/\" + str(i),data[i])\n\tprint(response.json())\n\n#response = requests.put(BASE + \"package/\" + str(5),{\"user_id\":\"112233\",\"reference_number\":\"1597\",\"destination\":\"CPT\",\"date\":\"30 Jan\",\"location\":\"FS\",\"timeslot\":\"12:00\"})\n#print(response)\n#response = requests.get(BASE + \"package/1\")\n#print(response.json())\n\n#response = requests.patch(BASE + \"package/0\",{\"reference_number\": \"7777\"})\n\n#response = requests.delete(BASE + \"package/123456\")\n#print(response)\n\nresponse = requests.get(BASE + \"package/1\")\nprint(response.json())\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"515034259","text":"\"\"\"\nlegacyhalos.qa\n==============\n\nCode to do produce various QA (quality assurance) plots. \n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\nsns.set(style='ticks', font_scale=1.4, palette='Set2')\n\nPIXSCALE = 0.262\n\ndef display_multiband(data, band=('g', 'r', 'z'), refband='r', geometry=None,\n mgefit=None, ellipsefit=None, indx=None, magrange=10,\n inchperband=3, contours=False, png=None):\n \"\"\"Display the multi-band images and, optionally, the isophotal fits based on\n either MGE and/or Ellipse.\n\n \"\"\"\n from astropy.visualization import ZScaleInterval as Interval\n from astropy.visualization import AsinhStretch as Stretch\n from astropy.visualization import ImageNormalize\n \n nband = len(band)\n\n fig, ax = plt.subplots(1, 3, figsize=(inchperband*nband, nband))\n for filt, ax1 in zip(band, ax):\n\n img = data['{}_masked'.format(filt)]\n #img = data[filt]\n\n norm = ImageNormalize(img, interval=Interval(contrast=0.95),\n stretch=Stretch(a=0.95))\n\n im = ax1.imshow(img, origin='lower', norm=norm, cmap='viridis',\n interpolation='nearest')\n plt.text(0.1, 0.9, filt, transform=ax1.transAxes, #fontweight='bold',\n ha='center', va='center', color='k', fontsize=14)\n\n if mgefit:\n from mge.mge_print_contours import _multi_gauss, _gauss2d_mge\n\n sigmapsf = np.atleast_1d(0)\n normpsf = np.atleast_1d(1)\n _magrange = 10**(-0.4*np.arange(0, magrange, 1)[::-1]) # 0.5 mag/arcsec^2 steps\n #_magrange = 10**(-0.4*np.arange(0, magrange, 0.5)[::-1]) # 0.5 mag/arcsec^2 steps\n\n model = _multi_gauss(mgefit[filt].sol, img, sigmapsf, normpsf,\n mgefit['xpeak'], mgefit['ypeak'],\n mgefit['pa'])\n \n peak = data[filt][mgefit['xpeak'], mgefit['ypeak']]\n levels = peak * _magrange\n s = img.shape\n extent = [0, s[1], 0, s[0]]\n\n ax1.contour(model, levels, colors='k', linestyles='solid',\n extent=extent, alpha=0.75, lw=1)\n\n if geometry:\n from photutils import EllipticalAperture\n \n ellaper = EllipticalAperture((geometry.x0, geometry.y0), geometry.sma,\n geometry.sma*(1 - geometry.eps), geometry.pa)\n ellaper.plot(color='k', lw=1, ax=ax1)\n\n if ellipsefit:\n if len(ellipsefit[filt]) > 0:\n if indx is None:\n indx = np.ones(len(ellipsefit[filt]), dtype=bool)\n\n nfit = len(indx) # len(ellipsefit[filt])\n nplot = np.rint(0.5*nfit).astype('int')\n \n smas = np.linspace(0, ellipsefit[filt].sma[indx].max(), nplot)\n for sma in smas:\n efit = ellipsefit[filt].get_closest(sma)\n x, y, = efit.sampled_coordinates()\n ax1.plot(x, y, color='k', alpha=0.75)\n else:\n from photutils import EllipticalAperture\n geometry = ellipsefit['geometry']\n ellaper = EllipticalAperture((geometry.x0, geometry.y0), geometry.sma,\n geometry.sma*(1 - geometry.eps), geometry.pa)\n ellaper.plot(color='k', lw=1, ax=ax1)\n\n ax1.get_xaxis().set_visible(False)\n ax1.get_yaxis().set_visible(False)\n ax1.axis('off')\n ax1.set_adjustable('box-forced')\n ax1.autoscale(False)\n\n fig.subplots_adjust(wspace=0.02, top=0.98, bottom=0.02, left=0.02, right=0.98)\n if png:\n print('Writing {}'.format(png))\n fig.savefig(png, bbox_inches='tight', pad_inches=0)\n plt.close(fig)\n else:\n plt.show()\n\ndef display_ellipsefit(ellipsefit, band=('g', 'r', 'z'), refband='r', redshift=None,\n pixscale=0.262, xlog=False, png=None):\n \"\"\"Display the isophote fitting results.\"\"\"\n\n from matplotlib.ticker import FormatStrFormatter, ScalarFormatter\n\n if redshift:\n from astropy.cosmology import WMAP9 as cosmo\n smascale = pixscale / cosmo.arcsec_per_kpc_proper(redshift).value # [kpc/pixel]\n smaunit = 'kpc'\n else:\n smascale = 1.0\n smaunit = 'pixels'\n\n colors = iter(sns.color_palette())\n\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(12, 9), sharex=True)\n for filt in np.atleast_1d(refband):\n\n good = (ellipsefit[filt].stop_code < 4)\n bad = ~good\n\n ax1.fill_between(ellipsefit[filt].sma[good] * smascale, \n ellipsefit[filt].eps[good]-ellipsefit[filt].ellip_err[good],\n ellipsefit[filt].eps[good]+ellipsefit[filt].ellip_err[good])#,\n #edgecolor='k', lw=2)\n if np.count_nonzero(bad) > 0:\n ax1.scatter(ellipsefit[filt].sma[bad] * smascale, ellipsefit[filt].eps[bad],\n marker='s', s=40, edgecolor='k', lw=2, alpha=0.75)\n \n #ax1.errorbar(ellipsefit[filt].sma[good] * smascale,\n # ellipsefit[filt].eps[good],\n # ellipsefit[filt].ellip_err[good], fmt='o',\n # markersize=4)#, color=color[filt])\n #ax1.set_ylim(0, 0.5)\n ax1.xaxis.set_major_formatter(ScalarFormatter())\n\n ax2.fill_between(ellipsefit[filt].sma[good] * smascale, \n ellipsefit[filt].pa[good]-ellipsefit[filt].pa_err[good],\n ellipsefit[filt].pa[good]+ellipsefit[filt].pa_err[good])#,\n #edgecolor='k', lw=2)\n if np.count_nonzero(bad) > 0:\n ax2.scatter(ellipsefit[filt].sma[bad] * smascale, ellipsefit[filt].pa[bad],\n marker='s', s=40, edgecolor='k', lw=2, alpha=0.75)\n #ax2.errorbar(ellipsefit[filt].sma[good] * smascale,\n # np.degrees(ellipsefit[filt].pa[good]),\n # np.degrees(ellipsefit[filt].pa_err[good]), fmt='o',\n # markersize=4)#, color=color[filt])\n ax2.yaxis.tick_right()\n ax2.yaxis.set_label_position('right')\n ax2.xaxis.set_major_formatter(ScalarFormatter())\n #ax2.set_ylim(0, 180)\n\n ax3.fill_between(ellipsefit[filt].sma[good] * smascale, \n ellipsefit[filt].x0[good]-ellipsefit[filt].x0_err[good],\n ellipsefit[filt].x0[good]+ellipsefit[filt].x0_err[good])#,\n #edgecolor='k', lw=2)\n if np.count_nonzero(bad) > 0:\n ax3.scatter(ellipsefit[filt].sma[bad] * smascale, ellipsefit[filt].x0[bad],\n marker='s', s=40, edgecolor='k', lw=2, alpha=0.75)\n #ax3.errorbar(ellipsefit[filt].sma[good] * smascale, ellipsefit[filt].x0[good],\n # ellipsefit[filt].x0_err[good], fmt='o',\n # markersize=4)#, color=color[filt])\n ax3.xaxis.set_major_formatter(ScalarFormatter())\n ax3.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))\n \n ax4.fill_between(ellipsefit[filt].sma[good] * smascale, \n ellipsefit[filt].y0[good]-ellipsefit[filt].y0_err[good],\n ellipsefit[filt].y0[good]+ellipsefit[filt].y0_err[good])#,\n #edgecolor='k', lw=2)\n if np.count_nonzero(bad) > 0:\n ax4.scatter(ellipsefit[filt].sma[bad] * smascale, ellipsefit[filt].y0[bad],\n marker='s', s=40, edgecolor='k', lw=2, alpha=0.75)\n #ax4.errorbar(ellipsefit[filt].sma[good] * smascale, ellipsefit[filt].y0[good],\n # ellipsefit[filt].y0_err[good], fmt='o',\n # markersize=4)#, color=color[filt])\n ax4.yaxis.tick_right()\n ax4.yaxis.set_label_position('right')\n ax4.xaxis.set_major_formatter(ScalarFormatter())\n ax4.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))\n \n ax1.set_ylabel('Ellipticity')\n ax2.set_ylabel('Position Angle (deg)')\n ax3.set_xlabel('Semimajor Axis ({})'.format(smaunit))\n ax3.set_ylabel(r'$x_{0}$')\n ax4.set_xlabel('Semimajor Axis ({})'.format(smaunit))\n ax4.set_ylabel(r'$y_{0}$')\n \n if xlog:\n for xx in (ax1, ax2, ax3, ax4):\n xx.set_xscale('log')\n\n fig.subplots_adjust(hspace=0.05, wspace=0.05, bottom=0.15, right=0.85, left=0.15)\n\n if png:\n print('Writing {}'.format(png))\n fig.savefig(png)\n plt.close(fig)\n else:\n plt.show()\n \ndef display_ellipse_sbprofile(ellipsefit, band=('g', 'r', 'z'), refband='r',\n minerr=0.02, redshift=None, pixscale=0.262,\n sersicfit=None, png=None):\n \"\"\"Display the multi-band surface brightness profile.\n\n \"\"\"\n from legacyhalos.util import ellipse_sbprofile\n\n sbprofile = ellipse_sbprofile(ellipsefit, band=band, refband=refband, minerr=minerr,\n redshift=redshift, pixscale=pixscale)\n\n colors = iter(sns.color_palette())\n\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 8), sharex=True)\n for filt in band:\n\n good = (ellipsefit[filt].stop_code < 4)\n bad = ~good\n\n col = next(colors)\n ax1.fill_between(sbprofile['sma'], \n sbprofile['mu_{}'.format(filt)] - sbprofile['mu_{}_err'.format(filt)],\n sbprofile['mu_{}'.format(filt)] + sbprofile['mu_{}_err'.format(filt)],\n #sbprofile['{}'.format(filt)] - sbprofile['{}_err'.format(filt)],\n #sbprofile['{}'.format(filt)] + sbprofile['{}_err'.format(filt)],\n label=r'${}$'.format(filt), color=col, alpha=0.75, edgecolor='k', lw=2)\n #if np.count_nonzero(bad) > 0:\n # ax1.scatter(sbprofile['sma'][bad], sbprofile[filt][bad], marker='s',\n # s=40, edgecolor='k', lw=2, alpha=0.75)\n\n #ax1.axhline(y=ellipsefit['mu_{}_sky'.format(filt)], color=col, ls='--')\n if filt == refband:\n ysky = ellipsefit['mu_{}_sky'.format(filt)] - 2.5 * np.log10(0.1) # 10% of sky\n ax1.axhline(y=ysky, color=col, ls='--')\n\n # Overplot the best-fitting model.\n if sersicfit:\n from astropy.modeling.models import Sersic1D\n rad = np.arange(0, sbprofile['sma'].max(), 0.1)\n sbmodel = -2.5 * np.log10( Sersic1D.evaluate(\n rad, sersicfit[filt].amplitude, sersicfit[filt].r_eff,\n sersicfit[filt].n) )\n ax1.plot(rad, sbmodel, lw=2, ls='--', alpha=1, color=col)\n \n ax1.set_ylabel(r'Surface Brightness (mag arcsec$^{-2}$)')\n ax1.set_ylim(30, 18)\n\n #ax1.set_ylabel(r'$\\mu$ (mag arcsec$^{-2}$)')\n #ax1.set_ylim(31.99, 18)\n\n ax1.legend(loc='upper right')\n\n ax2.fill_between(sbprofile['sma'],\n sbprofile['rz'] - sbprofile['rz_err'],\n sbprofile['rz'] + sbprofile['rz_err'],\n label=r'$r - z$', color=next(colors), alpha=0.75,\n edgecolor='k', lw=2)\n \n ax2.fill_between(sbprofile['sma'],\n sbprofile['gr'] - sbprofile['gr_err'],\n sbprofile['gr'] + sbprofile['gr_err'],\n label=r'$g - r$', color=next(colors), alpha=0.75,\n edgecolor='k', lw=2)\n\n ax2.set_xlabel('Semimajor Axis ({})'.format(sbprofile['smaunit']), alpha=0.75)\n ax2.set_ylabel('Color (mag)')\n ax2.set_ylim(0, 2.4)\n ax2.legend(loc='upper left')\n\n fig.subplots_adjust(hspace=0.0)\n\n if png:\n print('Writing {}'.format(png))\n fig.savefig(png)\n plt.close(fig)\n else:\n plt.show()\n \ndef display_mge_sbprofile(mgefit, band=('g', 'r', 'z'), refband='r', redshift=None,\n indx=None, pixscale=0.262, png=None):\n \"\"\"Display the multi-band surface brightness profile.\"\"\"\n\n colors = iter(sns.color_palette())\n\n if redshift:\n from astropy.cosmology import WMAP9 as cosmo\n smascale = pixscale / cosmo.arcsec_per_kpc_proper(redshift).value # [kpc/pixel]\n smaunit = 'kpc'\n else:\n smascale = 1.0\n smaunit = 'pixels'\n\n #if indx is None:\n # indx = np.ones_like(mgefit[refband].radius, dtype='bool')\n \n def _sbprofile(mgefit, indx, smascale):\n \"\"\"Convert fluxes to magnitudes and colors.\"\"\"\n sbprofile = dict()\n \n with np.errstate(invalid='ignore'):\n for filt in band:\n #indx = np.ones_like(mgefit[filt].radius, dtype='bool')\n indx = np.argsort(mgefit[filt].radius)\n \n sbprofile['{}_sma'.format(filt)] = mgefit[filt].radius[indx] * smascale\n sbprofile[filt] = 22.5 - 2.5 * np.log10(mgefit[filt].counts[indx])\n sbprofile['{}_err'.format(filt)] = 2.5 * mgefit[filt].err[indx] / mgefit[filt].counts[indx] / np.log(10)\n\n sbprofile['{}_model'.format(filt)] = 22.5 - 2.5 * np.log10(mgefit[filt].yfit[indx])\n\n #sbprofile['{}_err'.format(filt)] = mgefit[filt].int_err[indx] / \\\n # mgefit[filt].intens[indx] / np.log(10)\n\n # Just for the plot use a minimum uncertainty\n #sbprofile['{}_err'.format(filt)][sbprofile['{}_err'.format(filt)] < minerr] = minerr\n\n #sbprofile['gr'] = sbprofile['g'] - sbprofile['r']\n #sbprofile['rz'] = sbprofile['r'] - sbprofile['z']\n #sbprofile['gr_err'] = np.sqrt(sbprofile['g_err']**2 + sbprofile['r_err']**2)\n #sbprofile['rz_err'] = np.sqrt(sbprofile['r_err']**2 + sbprofile['z_err']**2)\n #\n ## Just for the plot use a minimum uncertainty\n #sbprofile['gr_err'][sbprofile['gr_err'] < minerr] = minerr\n #sbprofile['rz_err'][sbprofile['rz_err'] < minerr] = minerr\n\n return sbprofile\n\n sbprofile = _sbprofile(mgefit, indx, smascale)\n\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 6), sharex=True)\n for filt in band:\n ax1.scatter(sbprofile['{}_sma'.format(filt)], sbprofile[filt], marker='s',\n label=r'${}$'.format(filt), color=next(colors), alpha=0.75, s=10)\n #ax1.errorbar(sbprofile['{}_sma'.format(filt)], sbprofile[filt], yerr=sbprofile['{}_err'.format(filt)],\n # label=r'${}$'.format(filt), color=next(colors), alpha=0.75, fmt='s',\n # markersize=2)\n #ax1.fill_between(sbprofile['{}_sma'.format(filt)], sbprofile[filt]-sbprofile['{}_err'.format(filt)],\n # sbprofile[filt]+sbprofile['{}_err'.format(filt)],\n # label=r'${}$'.format(filt), color=next(colors), alpha=0.75)\n #ax1.plot(sbprofile['{}_sma'.format(filt)], sbprofile['{}_model'.format(filt)], lw=2, color='k')\n ax1.set_ylabel('AB Magnitude')\n ax1.set_ylim(32.99, 20)\n #ax1.invert_yaxis()\n ax1.legend(loc='upper right')\n\n #ax2.fill_between(sbprofile['sma'], sbprofile['rz']-sbprofile['rz_err'],\n # sbprofile['rz']+sbprofile['rz_err'],\n # label=r'$r - z$', color=next(colors), alpha=0.75)\n #\n #ax2.fill_between(sbprofile['sma'], sbprofile['gr']-sbprofile['gr_err'],\n # sbprofile['gr']+sbprofile['gr_err'],\n # label=r'$g - r$', color=next(colors), alpha=0.75)\n\n ax2.set_xlabel('Semimajor Axis ({})'.format(smaunit), alpha=0.75)\n ax2.set_ylabel('Color')\n ax2.set_ylim(-0.5, 2.5)\n #ax2.legend(loc='upper left')\n\n fig.subplots_adjust(hspace=0.0)\n\n if png:\n print('Writing {}'.format(png))\n fig.savefig(png)\n plt.close(fig)\n else:\n plt.show()\n \ndef sample_trends(sample, htmldir, analysisdir=None, refband='r',\n pixscale=PIXSCALE):\n \"\"\"Trends with the whole sample.\"\"\"\n from astropy.cosmology import WMAP9 as cosmo\n from legacyhalos.io import get_objid\n from legacyhalos.io import read_ellipsefit\n\n trendsdir = os.path.join(htmldir, 'trends')\n if not os.path.isdir(trendsdir):\n os.makedirs(trendsdir, exist_ok=True)\n\n # Ellipticity vs semi-major axis\n png = os.path.join(trendsdir, 'sma_vs_ellipticity.png')\n \n fig, ax1 = plt.subplots()\n for gal in sample:\n objid, objdir = get_objid(gal, analysisdir=analysisdir)\n smascale = pixscale / cosmo.arcsec_per_kpc_proper(gal['z']).value # [kpc/pixel]\n\n ellipsefit = read_ellipsefit(objid, objdir)\n if len(ellipsefit) > 0:\n good = (ellipsefit[refband].stop_code < 4)\n\n ax1.fill_between(ellipsefit[refband].sma[good] * smascale, \n ellipsefit[refband].eps[good]-ellipsefit[refband].ellip_err[good],\n ellipsefit[refband].eps[good]+ellipsefit[refband].ellip_err[good],\n alpha=0.9, color='gray')\n\n ax1.grid('on')\n ax1.set_ylim(0, 0.5)\n ax1.set_ylabel('Ellipticity')\n ax1.set_xlabel('Semimajor Axis (kpc)')\n\n fig.subplots_adjust(bottom=0.15, right=0.95, left=0.15, top=0.95)\n\n if png:\n print('Writing {}'.format(png))\n fig.savefig(png)\n plt.close(fig)\n else:\n plt.show()\n\n","sub_path":"legacyhalos/qa.py","file_name":"qa.py","file_ext":"py","file_size_in_byte":17359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"511455588","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_str\nfrom ..utils import (\n int_or_none,\n parse_age_limit,\n str_or_none,\n try_get,\n unified_strdate,\n unified_timestamp,\n url_or_none,\n)\n\n\nclass Zee5IE(InfoExtractor):\n _VALID_URL = r'''(?x)\n (?:\n zee5:|\n (?:https?://)(?:www\\.)?zee5\\.com/(?:[^#?]+/)?\n (?:\n (?:tvshows|kids|zee5originals)(?:/[^#/?]+){3}\n |movies/[^#/?]+\n )/(?P[^#/?]+)/\n )\n (?P[^#/?]+)/?(?:$|[?#])\n '''\n _TESTS = [{\n 'url': 'https://www.zee5.com/movies/details/krishna-the-birth/0-0-63098',\n 'info_dict': {\n 'id': '0-0-63098',\n 'ext': 'mp4',\n 'display_id': 'krishna-the-birth',\n 'title': 'Krishna - The Birth',\n 'duration': 4368,\n 'average_rating': 4,\n 'description': compat_str,\n 'alt_title': 'Krishna - The Birth',\n 'uploader': 'Zee Entertainment Enterprises Ltd',\n 'release_date': '20060101',\n 'upload_date': '20060101',\n 'timestamp': 1136073600,\n 'thumbnail': 'https://akamaividz.zee5.com/resources/0-0-63098/list/270x152/0063098_list_80888170.jpg',\n 'tags': list\n },\n 'params': {\n 'format': 'bv',\n },\n }, {\n 'url': 'https://zee5.com/tvshows/details/krishna-balram/0-6-1871/episode-1-the-test-of-bramha/0-1-233402',\n 'info_dict': {\n 'id': '0-1-233402',\n 'ext': 'mp4',\n 'display_id': 'episode-1-the-test-of-bramha',\n 'title': 'Episode 1 - The Test Of Bramha',\n 'duration': 1336,\n 'average_rating': 4,\n 'description': compat_str,\n 'alt_title': 'Episode 1 - The Test Of Bramha',\n 'uploader': 'Green Gold',\n 'release_date': '20090101',\n 'upload_date': '20090101',\n 'timestamp': 1230768000,\n 'thumbnail': 'https://akamaividz.zee5.com/resources/0-1-233402/list/270x152/01233402_list.jpg',\n 'series': 'Krishna Balram',\n 'season_number': 1,\n 'episode_number': 1,\n 'tags': list,\n },\n 'params': {\n 'format': 'bv',\n },\n }, {\n 'url': 'https://www.zee5.com/hi/tvshows/details/kundali-bhagya/0-6-366/kundali-bhagya-march-08-2021/0-1-manual_7g9jv1os7730?country=IN',\n 'only_matching': True\n }, {\n 'url': 'https://www.zee5.com/global/hi/tvshows/details/kundali-bhagya/0-6-366/kundali-bhagya-march-08-2021/0-1-manual_7g9jv1os7730',\n 'only_matching': True\n }]\n\n def _real_extract(self, url):\n video_id, display_id = re.match(self._VALID_URL, url).group('id', 'display_id')\n access_token_request = self._download_json(\n 'https://useraction.zee5.com/token/platform_tokens.php?platform_name=web_app',\n video_id, note='Downloading access token')\n token_request = self._download_json(\n 'https://useraction.zee5.com/tokennd',\n video_id, note='Downloading video token')\n json_data = self._download_json(\n 'https://gwapi.zee5.com/content/details/{}?translation=en&country=IN'.format(video_id),\n video_id, headers={'X-Access-Token': access_token_request['token']})\n m3u8_url = try_get(\n json_data,\n (lambda x: x['hls'][0], lambda x: x['video_details']['hls_url']),\n compat_str)\n formats = self._extract_m3u8_formats(\n 'https://zee5vodnd.akamaized.net' + m3u8_url.replace('/drm', '/hls', 1) + token_request['video_token'],\n video_id, fatal=False)\n mpd_url = try_get(\n json_data,\n (lambda x: x['video'][0], lambda x: x['video_details']['url']),\n compat_str)\n formats += self._extract_mpd_formats(\n 'https://zee5vod.akamaized.net' + mpd_url,\n video_id, fatal=False)\n\n self._sort_formats(formats)\n return {\n 'id': video_id,\n 'display_id': display_id,\n 'title': json_data['title'],\n 'formats': formats,\n 'duration': int_or_none(json_data.get('duration')),\n 'average_rating': int_or_none(json_data.get('rating')),\n 'description': str_or_none(json_data.get('description')),\n 'alt_title': str_or_none(json_data.get('original_title')),\n 'uploader': str_or_none(json_data.get('content_owner')),\n 'age_limit': parse_age_limit(json_data.get('age_rating')),\n 'release_date': unified_strdate(json_data.get('release_date')),\n 'timestamp': unified_timestamp(json_data.get('release_date')),\n 'thumbnail': url_or_none(json_data.get('image_url')),\n 'series': try_get(json_data, lambda x: x['tvshow_details']['title'], str),\n 'season': try_get(json_data, lambda x: x['season_details']['title'], str),\n 'season_number': int_or_none(try_get(json_data, lambda x: x['season_details']['index'])),\n 'episode_number': int_or_none(try_get(json_data, lambda x: x['index'])),\n 'tags': try_get(json_data, lambda x: x['tags'], list)\n }\n\n\nclass Zee5SeriesIE(InfoExtractor):\n IE_NAME = 'zee5:series'\n _VALID_URL = r'''(?x)\n (?:\n zee5:series:|\n (?:https?://)(?:www\\.)?zee5\\.com/(?:[^#?]+/)?\n (?:tvshows|kids|zee5originals)(?:/[^#/?]+){2}/\n )\n (?P[^#/?]+)/?(?:$|[?#])\n '''\n _TESTS = [{\n 'url': 'https://www.zee5.com/kids/kids-shows/krishna-balram/0-6-1871',\n 'playlist_mincount': 43,\n 'info_dict': {\n 'id': '0-6-1871',\n },\n }, {\n 'url': 'https://www.zee5.com/tvshows/details/bhabi-ji-ghar-par-hai/0-6-199',\n 'playlist_mincount': 1500,\n 'info_dict': {\n 'id': '0-6-199',\n },\n }, {\n 'url': 'https://www.zee5.com/tvshows/details/agent-raghav-crime-branch/0-6-965',\n 'playlist_mincount': 25,\n 'info_dict': {\n 'id': '0-6-965',\n },\n }, {\n 'url': 'https://www.zee5.com/ta/tvshows/details/nagabhairavi/0-6-3201',\n 'playlist_mincount': 3,\n 'info_dict': {\n 'id': '0-6-3201',\n },\n }, {\n 'url': 'https://www.zee5.com/global/hi/tvshows/details/khwaabon-ki-zamin-par/0-6-270',\n 'playlist_mincount': 150,\n 'info_dict': {\n 'id': '0-6-270',\n },\n }\n ]\n\n def _entries(self, show_id):\n access_token_request = self._download_json(\n 'https://useraction.zee5.com/token/platform_tokens.php?platform_name=web_app',\n show_id, note='Downloading access token')\n headers = {\n 'X-Access-Token': access_token_request['token'],\n 'Referer': 'https://www.zee5.com/',\n }\n show_url = 'https://gwapi.zee5.com/content/tvshow/{}?translation=en&country=IN'.format(show_id)\n\n page_num = 0\n show_json = self._download_json(show_url, video_id=show_id, headers=headers)\n for season in show_json.get('seasons') or []:\n season_id = try_get(season, lambda x: x['id'], compat_str)\n next_url = 'https://gwapi.zee5.com/content/tvshow/?season_id={}&type=episode&translation=en&country=IN&on_air=false&asset_subtype=tvshow&page=1&limit=100'.format(season_id)\n while next_url:\n page_num += 1\n episodes_json = self._download_json(\n next_url, video_id=show_id, headers=headers,\n note='Downloading JSON metadata page %d' % page_num)\n for episode in try_get(episodes_json, lambda x: x['episode'], list) or []:\n video_id = episode.get('id')\n yield self.url_result(\n 'zee5:%s' % video_id,\n ie=Zee5IE.ie_key(), video_id=video_id)\n next_url = url_or_none(episodes_json.get('next_episode_api'))\n\n def _real_extract(self, url):\n show_id = self._match_id(url)\n return self.playlist_result(self._entries(show_id), playlist_id=show_id)\n","sub_path":"yt_dlp/extractor/zee5.py","file_name":"zee5.py","file_ext":"py","file_size_in_byte":8504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"459407021","text":"import angr\nimport os\nimport claripy\nimport inspect\nimport pickle\nimport re\nimport time\nimport logging\nimport json\nimport argparse\nimport itertools\nfrom glob import glob\n\ndef get_cfg_funcs(proj, binary, excluded):\n \"\"\"\n get functions that are suitable for analysis, (funcs that are defined in the binary and not libc funcs...)\n \"\"\"\n return list(filter(None, [f if f.binary_name == binary and (not f.is_plt) and not f.name.startswith(\n \"sub_\") and not f.name.startswith(\"_\") and f.name not in excluded else None for f in\n proj.kb.functions.values()]))\n\n\ndef analyze_func(proj, fun, cfg):\n print(f\"started running {fun.name}\")\n call_state = proj.factory.call_state(fun.addr, add_options={\n 'CALLLESS': True, 'NO_SYMBOLIC_SYSCALL_RESOLUTION': True, 'TRACK_ACTION_HISTORY': True , 'TRACK_CONSTRAINTS': True\n })\n # dropped the relativization in the last moment due to time consedirations, and we think that the address_breakfun\n\n\n # need to be checked again...\n # call_state.inspect.b('address_concretization', when=angr.BP_AFTER, action=address_breakfun)\n sm = proj.factory.simulation_manager(call_state)\n sm.use_technique(angr.exploration_techniques.LoopSeer(cfg=cfg, bound=2)) \n #print(\"--------------------------------------\")\n sm1=sm\n while (True):\n if hasattr(sm,'deadended') and len(sm.deadended) > 0:\n break\n else:\n sim = sm.step()\n if len(sim.active) > 0:\n curr = sim.active[0]\n print(curr.solver.constraints)\n #print(\"%%%%%%%%%%%%%%%%%%%%%%%%%%\")\n sm1.run()\n return sm1\n\ndef block_to_ins(block: angr.block.Block):\n result = []\n for ins in block.capstone.insns:\n print(ins)\n op_str = ins.op_str\n operands = op_str.strip(\" \").split(\",\")\n operands = [i.strip().replace(\"[\",\"\").replace(\"]\", \"\") for i in operands if i != \"\"]\n parsed_ins = [ins.mnemonic] + list(filter(None, operands))\n result.append(\"|\".join(parsed_ins).replace(\" \", \"|\") + \"|\\t\")\n # result.append(f\"{ins.mnemonic}|{operands[0]}|{operands[1]}\".replace(\" \", \"|\"))\n return \"|\".join(result)\n\ndef run(binary_name):\n proj = angr.Project(binary_name)\n # print(proj.factory.path_group())\n cfg = proj.analyses.CFGFast()\n s = claripy.Solver()\n idfer = proj.analyses.Identifier()\n for funcInfo in idfer.func_info:\n print(funcInfo.name) \n funcs = get_cfg_funcs(proj, binary_name, {})\n for test_func in funcs:\n sm: angr.sim_manager.SimulationManager = analyze_func(proj, test_func, cfg)\n sashes = sm.stashes.values()\n print(\"/******ALL CONSTRAINTS !!***********************/\")\n for exec_paths in sashes:\n for exec_path in exec_paths:\n #print(exec_path)\n #print(\"printing func\")\n #print(test_func)\n #print(\"printing constraint\")\n #print(exec_path.libc)\n #print(exec_path.regs.r16)\n blocks = [proj.factory.block(baddr) for baddr in exec_path.history.bbl_addrs]\n processsed_code = \"|\".join(list(filter(None, map(block_to_ins, blocks))))\n #print(processsed_code)\n print(exec_path.log.actions.gi_frame)\n print(exec_path.log.actions.__class__.__dict__.items())\n for p in exec_path.se.constraints:\n print((p))\n # print(type(exec_paths))\n #print(type(exec_path))\n #print(\"Heyyy /**/\")\n #print(exec_path.solver)\n #print(p)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--binary_name\", type=str, required=True)\n args = parser.parse_args()\n run(args.binary_name)\n\n","sub_path":"angr_experiments/angr_script.py","file_name":"angr_script.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"294256575","text":"class Ocean(object):\n\n def __init__(self, rows, columns):\n self.rows = rows\n self.columns = columns\n self.table = [None for i in range(rows)]\n for i in range(rows):\n self.table[i] = input().split(' ')\n\n def CheckCoordinates(self, i, j):\n return i < self.rows and j < self.columns and i >= 0 and j >= 0\n\n def GetNeighbours(self, i, j):\n neighbours = []\n if self.CheckCoordinates(i - 1, j - 1):\n neighbours.append(self.table[i - 1][j - 1])\n if self.CheckCoordinates(i, j - 1):\n neighbours.append(self.table[i][j - 1])\n if self.CheckCoordinates(i + 1, j - 1):\n neighbours.append(self.table[i + 1][j - 1])\n\n if self.CheckCoordinates(i - 1, j):\n neighbours.append(self.table[i - 1][j])\n if self.CheckCoordinates(i + 1, j):\n neighbours.append(self.table[i + 1][j])\n\n if self.CheckCoordinates(i - 1, j + 1):\n neighbours.append(self.table[i - 1][j + 1])\n if self.CheckCoordinates(i, j + 1):\n neighbours.append(self.table[i][j + 1])\n if self.CheckCoordinates(i + 1, j + 1):\n neighbours.append(self.table[i + 1][j + 1])\n return neighbours\n\n def Operate(self):\n new_table = [[None for _ in range(columns)] for i in range(rows)]\n for i in range(rows):\n for j in range(columns):\n new_table[i][j] = self.table[i][j]\n\n for i in range(rows):\n for j in range(columns):\n nb = self.GetNeighbours(i, j)\n if self.table[i][j] == '2' and \\\n (nb.count('2') < 2 or nb.count('2') > 3):\n new_table[i][j] = '0'\n elif self.table[i][j] == '3' and \\\n (nb.count('3') < 2 or nb.count('3') > 3):\n new_table[i][j] = '0'\n elif self.table[i][j] == '0' and nb.count('2') == 3:\n new_table[i][j] = '2'\n elif self.table[i][j] == '0' and nb.count('3') == 3:\n new_table[i][j] = '3'\n\n self.table = new_table.copy()\n\n def PrintTable(self):\n for i in range(self.rows):\n print(' '.join(self.table[i]))\n\n\nnum_to_operate = int(input())\nrc = input().split(' ')\nrows, columns = int(rc[0]), int(rc[1])\na = Ocean(rows, columns)\nfor i in range(num_to_operate):\n a.Operate()\n\na.PrintTable()\n\n","sub_path":"shad-py/Difference in time.py","file_name":"Difference in time.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"510384437","text":"# -*- coding: utf-8 -*-\nimport os\nimport re\nimport html.parser\nimport datetime\n\ndir_name = \"FilesForExtraction/\"\ncurrent_dir = \"Current/\"\npast_dir = \"Past/\"\npattern = re.compile(\"/class='grey_border' alt='(.*?)'\")\n\nclass FileHandler:\n def list_files_by_extensions(dir_name):\n fileList = []\n for file in os.listdir(dir_name):\n dirfile = os.path.join(dir_name, file)\n if os.path.isfile(dirfile):\n fileList.append(file)\n return fileList\n\n def merge_files(self):\n #TODO if filelist\n text = \"\"\n fileList = FileHandler.list_files_by_extensions(self)\n for file_name in fileList:\n text += open(self+file_name, 'r').read() + \"\\n\"\n print(file_name+\"000000000000000000000000000\")\n return text\n\n def str_to_file(csv_lines, file_name):\n with open(file_name, 'w') as csv_file:\n csv_file.truncate()\n csv_file.write(csv_lines)\n\nclass draw_scene:\n def __init__(self):\n\n def lines_to_dict(csv_lines):\n dict = {}\n for line in csv_lines.splitlines():\n if line:\n list = line.split(\";\")\n key = list.pop(0)\n if not key in dict:\n values = []\n for value in list:\n if value:\n values.append(value)\n dict.update({key: values})\n return dict\n\n def dict_to_lines(dict):\n number_of_users = 0\n csv_lines = \"\"\n for key, values in dict.items():\n csv_line_values = \"\"\n for value in values:\n csv_line_values += value+\";;\"\n csv_lines += key+\";\"+csv_line_values+\"\\n\"\n number_of_users += 1\n print(\"Number of users: \" + repr(number_of_users) + \"\\n\")\n return csv_lines\n\n users_dict = {}\n pattern = re.compile('(?:div class=\"(?:user-name|username)\".*\\n?.*people/)(\\w*)(?:.*>)(.*)(?:)')\n #pattern = re.compile('(?:div class=\"user-name\".*people/)(\\w*)(?:.*>)(.*)(?:)')\n #Hanna G\n fileList = FileHandler.list_files_by_extensions(dir_name)\n for file_name in fileList:\n text = open(dir_name+file_name, 'r').read()\n matches = re.findall(pattern, text)\n if matches:\n for account, name in matches:\n dict_record = {html.unescape(account): [name, \"https://www.etsy.com/people/\"+account, datetime.date.today().isoformat()]}\n if not html.unescape(name) in users_dict:\n users_dict.update(dict_record)\n\n #write to .csv file\n FileHandler.str_to_file(dict_to_lines(users_dict), 'order_info.csv')\n\n # Compose dictionaries from files in directories\n past_users = lines_to_dict(FileHandler.merge_files(past_dir))\n current_users = lines_to_dict(FileHandler.merge_files(current_dir))\n intersecting = {}\n new = {}\n dropped_out = {} # unfollowed\n merged = past_users\n\n # Intersecting and New\n # TODO Empty\n for key, values in current_users.items():\n if key in past_users:\n intersecting.update({key: values})\n else:\n new.update({key: values})\n\n # Dropped out\n for key, values in past_users.items():\n if not key in intersecting:\n dropped_out.update({key: values})\n\n # Merge all\n merged.update(new)\n\n FileHandler.str_to_file(dict_to_lines(intersecting), 'intersecting.csv')\n FileHandler.str_to_file(dict_to_lines(new), 'new.csv')\n FileHandler.str_to_file(dict_to_lines(dropped_out), 'dropped_out.csv')\n FileHandler.str_to_file(dict_to_lines(merged), 'merged.csv')\n\n print(dict_to_lines(new))\n\ndef main():\n scene = draw_scene()\n\nif __name__ == '__main__':\n main()","sub_path":"ExtractUsers.py","file_name":"ExtractUsers.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"557499291","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# ecabc/bees.py\n# v.2.1.1\n# Developed in 2018 by Sanskriti Sharma & Hernan Gelaf-Romer \n#\n# This program defines the bee objects created in the artificial bee colony\n#\n\nimport numpy as np\nfrom random import randint\n\nimport uuid\n\nclass EmployerBee:\n\n '''\n Class which stores individual employer bee information. A probability it will get picked, a score\n and the values that pertain to the score\n '''\n \n def __init__(self, values=[]): \n self.values = values \n self.score = None\n self.probability = 0\n self.failed_trials = 0\n self.id = uuid.uuid4()\n\n def calculate_probability(self, fitness_average):\n '''\n Calculate probability based on a given fitness average\n '''\n self.probability = self.score / fitness_average\n\n def get_fitness_score(self, values, fitness_function):\n '''\n Get fitness score for a given set of values\n '''\n score = fitness_function(values) \n if self.score == None or score < self.score:\n self.value = values\n self.score = score\n\nclass OnlookerBee:\n '''\n Class to store best performing bees, and also\n calculate positions for any given bees\n '''\n\n def __init__(self):\n self.best_employers = []\n \n def calculate_positions(self, first_bee, second_bee, value_types, value_ranges):\n '''\n Calculate the positions when merging two bees\n '''\n new_values = first_bee.values\n index = randint(0, len(first_bee.values)-1)\n min_value = value_ranges[index][1][0]\n max_value = value_ranges[index][1][1]\n value = first_bee.values[index] + abs(np.random.uniform(-1, 1) \\\n * (first_bee.values[index] - second_bee.values[index]))\n if value_types[index] == 'int': value = int(value)\n if value > max_value: value = max_value\n if value < min_value: value = min_value\n \n new_values[index] = value\n return new_values\n","sub_path":"ecabc/bees.py","file_name":"bees.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"76040796","text":"import os\nimport json\nimport time\nimport zipfile\nimport requests\nimport datetime\n\nfrom WebInterface.Connection import Connection\nfrom WebInterface.PullWebData import PullWebData\nfrom WebInterface.DownloadFile import DownloadFile\n\nfrom Reports.ReportManager import ReportManager\n\nfrom DocumentInterface.DocumentInterface import DocumentInterface\n\nfrom .DependencyAnalyzer import DependencyAnalyzer\n\nclass PackageManager:\n\n\t\"\"\"\n\n\t\t1 - PullWebData stores all underwriter data in webData\n\t\t\t\t- This is used to search for web information without needing\n\t\t\t\tto call softpro recurrently \n\n\t\t2 - CompareCSVDataToWebData determines if bundles needs an update\n\t\t\t\t- Items requiring updates are stored updatesRequired[]\n\t\t\t\t- Others are added to report manager as not to be duplicated\n\n\t\t3 - RunUpdates() pulls and downloads all packages needing updating.\n\t\t\t\t- The Dependency Updater runs recursively\n\n\t\"\"\"\n\n\tupdatesRequired = []\n\trequiredDependencies = {}\n\n\tdef ClassManager():\n\n\t\tprint(' - Pulling WebData')\n\t\tPullWebData.PullWebData('','')\n\n\t\tprint()\n\t\tprint('Comparing CSV Data for Updates')\n\t\tPackageManager.CompareCSVDataToWebData()\n\n\t\tprint()\n\t\tprint('Running Initial Updates')\n\t\tprint()\n\t\tPackageManager.RunUpdates(False)\n\n\n\t################################################################## Core Function\n\n\tdef CompareCSVDataToWebData():\n\n\t\tfor csvPackage in DocumentInterface.csvData:\n\n\t\t\tnameLower = csvPackage['name'].lower()\n\n\t\t\tPackageManager.GenerateReportItem(nameLower)\n\t\t\tReportManager.report[nameLower]['softProLastUpdated'] = csvPackage['lastUpdated']\n\n\t\t\t# Search webData\n\t\t\t# Remove pkg from list for shor list seraches?\n\t\t\tfor webPackage in PullWebData.webData['packages']:\n\n\t\t\t\twebLower = webPackage['name'].lower()\n\n\t\t\t\tif webLower == nameLower:\n\n\t\t\t\t\t#### Additional Checks for edge cases:\n\t\t\t\t\timportFailureList = ['fanh', 'staz', 'ftut', 'fnwv', 'stoh', 'wfnj']\n\n\t\t\t\t\ttempBlock = []#['fail','clta','cwnj','stnj','stna']\n\t\t\t\t\n\t\t\t\t\tnoDownloads = [ # Downloads different than expected packages whem the package is opened\n\t\t\t\t\t\t'ftnh', \n\t\t\t\t\t\t'ftmi', # FT => FF\n\t\t\t\t\t\t'ftpa', #### Potentail mistake\n\t\t\t\t\t\t'cwmi', # FFMI\n\t\t\t\t\t\t'cwnh', # FFNH / SPlite National / SPliteNewhampshire\n\t\t\t\t\t\t'fnsc' # Release notes only\n\t\t\t\t\t ]\n\n\t\t\t\t\tif webLower in importFailureList or webLower in noDownloads or webLower in tempBlock:\n\t\t\t\t\t\tprint('Passing known package failue: ' + webLower)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t#### Edge Cases complete\n\t\t\t\t\t\n\n\t\t\t\t\tReportManager.report[nameLower]['isOnSoftProWebsite'] = True\n\t\t\t\t\tReportManager.report[nameLower]['webLastUpdated'] = webPackage['updatedAt']\n\n\t\t\t\t\tSelectDate = PackageManager.CreateDateObjectFromSelect(csvPackage['lastUpdated'])\n\t\t\t\t\tSPWeb = PackageManager.CreateDateObjectFromSPWeb(webPackage['updatedAt'])\n\n\t\t\t\t\t# Generate a list of packages needing updating\n\t\t\t\t\tif SelectDate < SPWeb or not csvPackage['lastUpdated']:\n\n\t\t\t\t\t\tReportManager.report[nameLower]['isUpdateRequired'] = True\n\t\t\t\t\t\tPackageManager.updatesRequired.append(webPackage)\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tReportManager.report[nameLower]['isUpdateRequired'] = False\n\n\t\t\t\telse:\n\t\t\t\t\t# Bundle Not found on Website\n\t\t\t\t\tReportManager.report[nameLower]['isOnSoftProWebsite'] = False\n\n\n\t################################################################## Helper Functions\n\n\tdef GenerateReportItem(nameLower):\n\n\t\treport = {\n\t\t\t\t'bundleName': nameLower,\n\t\t\t\t'softProLastUpdated': None,\n\t\t\t\t'isOnSoftProWebsite': None,\n\t\t\t\t'webLastUpdated':None,\n\t\t\t\t'isUpdateRequired': None,\n\t\t\t\t'isDownloadSuccessFull':None,\n\t\t\t\t'downloadError': None,\n\t\t\t\t'downloadedAsDependency': None,\n\t\t\t\t'downloadNotes':None,\n\t\t\t\t'downloadNotesUnique' : None,\n\t\t\t\t'downloadNotesOrginalNote' : None,\n\t\t\t\t'zipContents':None\n\t\t\t}\n\n\t\tif nameLower not in ReportManager.report:\n\t\t\tReportManager.report[nameLower] = report\n\n\tdef CreateDateObjectFromSelect(date):\n\t\tif date == \"\":\n\t\t\treturn datetime.date(1984,1,1)\n\t\tdate = date.split(\" \")[0]\n\t\tday = date.split('/')[1]\n\t\tmonth = date.split('/')[0]\n\t\tyear = date.split('/')[2]\n\t\treturn datetime.date(int(year),int(month),int(day))\n\n\tdef CreateDateObjectFromSPWeb(date):\n\t\tdate = date.split(\" \")[0]\n\t\tday = date.split('-')[2]\n\t\tmonth = date.split('-')[1]\n\t\tyear = date.split('-')[0]\n\t\treturn datetime.date(int(year),int(month),int(day))\n\n\n\t################################################################## Core Function\n\n\tdef RunUpdates(dependencyState):\n\n\t\tif len(PackageManager.updatesRequired) > 0:\n\n\t\t\tremainingPackages = len(PackageManager.updatesRequired)\n\n\t\t\twhile remainingPackages > 0: # For loop misses iterations without\n\n\t\t\t\tcount = 0\n\t\t\t\tprint()\n\t\t\t\tprint(\"Downloading Package Reequirements\")\n\n\t\t\t\tfor webPackage in PackageManager.updatesRequired:\n\n\t\t\t\t\tcount+=1\n\t\t\t\t\tprint(\" - Downloading: \"+str(count)+\"/\"+str(remainingPackages))\n\n\t\t\t\t\t# Remove Item from Array\n\t\t\t\t\tPackageManager.updatesRequired.remove(webPackage)\n\n\t\t\t\t\t# Ensure Package is instanticated in report\n\t\t\t\t\tnameLower = webPackage['name'].lower()\n\t\t\t\t\tPackageManager.GenerateReportItem(nameLower)\n\n\t\t\t\t\t# inalize common variables\n\t\t\t\t\twebName = webPackage['name'].lower()\n\t\t\t\t\twebUpdatedAt = webPackage['updatedAt']\n\t\t\t\t\twebDownloadUrl = webPackage['downloadUrl']\n\t\t\t\t\twebDownloadNotes = webPackage['downloadNotes']\n\n\t\t\t\t\t# Report\n\t\t\t\t\tReportManager.report[webName]['downloadedAsDependency'] = dependencyState\n\t\t\t\t\tReportManager.report[webName]['downloadNotes'] = webDownloadNotes\n\t\t\t\t\t\n\t\t\t\t\t# Download current package and store to package state\n\t\t\t\t\tDownloadFile.DownloadFile(webName, webDownloadUrl)\n\n\t\t\t\t\t# Identify additional dependcies\n\t\t\t\t\tif webDownloadNotes != None:\n\n\t\t\t\t\t\t# Initalize class with 'web notes' parameter and record analyzer output\n\t\t\t\t\t\tdependencyAnalyzerInfo = DependencyAnalyzer(webDownloadNotes)\n\t\t\t\t\t\tdependencyInfo = dependencyAnalyzerInfo.DownloadNotesAnalyzer()\n\n\t\t\t\t\t\t# Report Unique, Unhandled sentances in analyser\n\t\t\t\t\t\tif len(dependencyInfo['unique']) > 0:\n\t\t\t\t\t\t\tReportManager.report[webName]['unique'] = dependencyInfo['unique']\n\t\t\t\t\t\t\tReportManager.report[webName]['orginalNote'] = dependencyInfo['unique']\n\n\t\t\t\t\t\t# Store dependcies for updating\n\t\t\t\t\t\tPackageManager.StoreDependencies(dependencyInfo['dependencies'])\n\t\t\n\t\t\t\tremainingPackages = len(PackageManager.updatesRequired)\n\n\t\t\t# Restart Recursion\n\t\t\tPackageManager.RunDependencyUpdates()\n\t\t\tPackageManager.RunUpdates(True)\n\n\n\t################################################################## Helper Functions\n\n\tdef StoreDependencies(dependencies):\n\n\t\tfor dependency in dependencies:\n\t\t\t\t\t\t\t\n\t\t\tcategory = dependency['category']\n\n\t\t\ttry:\n\t\t\t\t# Create instances of categories for dependiences\n\t\t\t\treqDepCategory = PackageManager.requiredDependencies[category]\n\t\t\texcept:\n\t\t\t\tPackageManager.requiredDependencies[category] = []\n\t\t\t\treqDepCategory = PackageManager.requiredDependencies[category] \n\n\t\t\tfor package in dependency['packages']:\n\t\t\t\treqDepCategory.append(package.lower()) \n\n\n\tdef RunDependencyUpdates():\n\t\tprint()\n\t\tprint(\"Iterating Dependcies\")\n\n\t\tPackagesAccountFor = 0\n\t\tUninstalledPackages = 0\n\n\t\t# Ensure the dependency is not already in system\n\t\tfor category,dependencyPackages in PackageManager.requiredDependencies.items():\n\t\t\t\n\t\t\tfor dependencyPackage in dependencyPackages:\n\n\t\t\t\t# add to Package Manager Updates Required list if not\n\t\t\t\tif dependencyPackage in ReportManager.report:\n\t\t\t\t\tPackagesAccountFor += 1\n\n\t\t\t\telse:\n\t\t\t\t\t# Add the webpackage, not just the name\n\t\t\t\t\twebPackage = PackageManager.GetWebPackage(category, dependencyPackage)\n\n\t\t\t\t\tif webPackage:\n\t\t\t\t\t\tPackageManager.updatesRequired.append(webPackage)\n\t\t\t\t\t\tUninstalledPackages+=1\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(' - webpackage missing')\n\t\t\t\t\t\tprint(dependencyPackage, category)\n\n\t\t# Reset Dependency Storage\n\t\tPackageManager.requiredDependencies = {}\n\n\t\t# Print New Count of item and update\n\t\tprint(\" - - Dependeices Accouted for: \" +str(PackagesAccountFor) )\n\t\tprint(\" - - New Installations: \" + str(UninstalledPackages))\n\n\n\tdef GetWebPackage(category, dependencyPackage):\n\n\t\tfor package in PullWebData.webData['packages']:\n\t\t\tif package['name'].lower() == dependencyPackage.lower():\n\t\t\t\treturn package\n\t\t\t\tbreak\n\n\t\treturn False","sub_path":"SoftProBundleManagement/App/PackageManager/PackageManager.py","file_name":"PackageManager.py","file_ext":"py","file_size_in_byte":8089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"610983709","text":"import bme280\nimport os\nfrom microbit import display, Image, button_a, sleep, running_time\n\ncalibrate = 300\nduration = 125000 # logging time in ms\ncountdown = 120\r\nmax_files = 5\n\nif not button_a.is_pressed():\n\n n = len(os.listdir()) - 1 # 1 indexed\n\n hc = 0.0\n offset = 0.0\n max_height = 0.0\n\n bme = bme280.bme280()\n\n if n > max_files:\n display.scroll(\"Disk Full!\", loop=True) # blocks\n\n display.scroll(\"calibrating \")\r\n bme.set_qnh(bme.pressure())\n for x in range(calibrate):\n hc = bme.altitude()\n offset += hc\n sleep(10)\n\n data = open(\"flt{}.csv\".format(n), \"w\")\n data.write(\"time,altitude\\n\")\n offset = hc / calibrate\r\n x = 0\r\n y = 0\n delay = running_time()\n\n while True:\n td = running_time() - delay # will be zero\n\n if not td % 500:\n h = bme.altitude() + offset # compensate for ground error\n max_height = h if h > max_height else max_height\n data.write(\"{},{}\\n\".format(td, h))\n \r\n if not td % 5000:\r\n display.set_pixel(x, y, 9)\r\n x = (x + 1) % 5\r\n if x == 0:\r\n y = (y + 1) % 5\r\n \n if td > duration:\r\n display.show(Image.YES)\n break\n\n data.close()\n sleep(2000)\n display.scroll(\"max height: {:.2f}m flights left: {:d}\".format(max_height, max_files-n), loop=True)\n\nelse:\n display.show(Image.NO)","sub_path":"data/20200614/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"581526474","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Group',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('Name', models.CharField(max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Notice',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('Notice_seq', models.CharField(max_length=50)),\n ('Notice_num', models.CharField(blank=True, max_length=30, null=True)),\n ('Board', models.CharField(max_length=10)),\n ('Date', models.DateTimeField(null=True)),\n ('Title', models.CharField(blank=True, max_length=1000)),\n ('Content', models.TextField(blank=True)),\n ('Notice_type', models.CharField(blank=True, max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='Reply',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('Content', models.CharField(max_length=1000)),\n ('Date', models.DateTimeField(auto_now=True)),\n ('group', models.ForeignKey(to='mjuTogether.Group')),\n ],\n ),\n migrations.CreateModel(\n name='Request',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('Date', models.DateTimeField(auto_now=True)),\n ('group', models.ForeignKey(to='mjuTogether.Group')),\n ],\n ),\n migrations.CreateModel(\n name='Student',\n fields=[\n ('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),\n ('Name', models.CharField(default='default', max_length=100)),\n ('Phone', models.CharField(max_length=20, null=True)),\n ('Major', models.CharField(max_length=100, null=True)),\n ('Interest', models.CharField(max_length=1000, null=True)),\n ('Email', models.CharField(max_length=100, null=True)),\n ('Birth', models.CharField(max_length=100, null=True)),\n ('RID', models.CharField(max_length=200)),\n ],\n ),\n migrations.AddField(\n model_name='request',\n name='student',\n field=models.ForeignKey(to='mjuTogether.Student', null=True),\n ),\n migrations.AddField(\n model_name='reply',\n name='student',\n field=models.ForeignKey(to='mjuTogether.Student', null=True),\n ),\n migrations.AddField(\n model_name='notice',\n name='students',\n field=models.ManyToManyField(to='mjuTogether.Student'),\n ),\n migrations.AddField(\n model_name='group',\n name='notice',\n field=models.ForeignKey(to='mjuTogether.Notice'),\n ),\n migrations.AddField(\n model_name='group',\n name='students',\n field=models.ManyToManyField(to='mjuTogether.Student'),\n ),\n ]\n","sub_path":"mjuTogether/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"106567773","text":"class Car:\n \n def __init__(self, tank = 0, fuel_cons = 1):\n self._tank = tank\n self._fuel_cons = fuel_cons\n \n def ride(self, dist):\n \n if isinstance(dist, Nav):\n dist = dist.dist()\n \n if dist < 0:\n return {'error': 'error'}\n \n res = {\n 'target': False,\n 'distance driven': self._tank / self._fuel_cons,\n 'fuel': 0\n }\n \n if res['distance driven'] >= dist:\n res['distance driven'] = dist\n res['fuel'] = self._tank - dist*self._fuel_cons\n\n return res\n\n\nclass Point:\n \n def __init__(self, x, y):\n self.x = x\n self.y = y\n \n def coords(self):\n return self.x, self.y\n\nclass Nav:\n \n def __init__(self, p1, p2, p3):\n self.p1 = p1.coords()\n self.p2 = p2.coords()\n self.p3 = p3.coords()\n print(self.p1)\n \n def dist(self):\n \n # calc here\n return self.p1[0]+self.p2[0]+self.p3[0]\n\n\npoint1 = Point(0,0)\npoint2 = Point(10,30)\npoint3 = Point(30,40)\n\nmy_nav = Nav(point1, point2, point3)\n\nprint(my_nav.dist())\n\n\ncar = Car(100, 2)\n\nprint(car.ride(my_nav))\nprint(car.ride(12345))\n","sub_path":"Class 10/Materials/ex.py","file_name":"ex.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"7283528","text":"import random\nimport time\n\n\nclass QuickSort(object):\n def sortion(self, a, low, high):\n i = low\n j = high\n mid = a[low + (high - low) // 2]\n while i <= j:\n while a[i] < mid:\n i += 1\n while a[j] > mid:\n j -= 1\n if i <= j:\n a[i], a[j] = a[j], a[i]\n i += 1\n j -= 1\n if low < j:\n self.sortion(a, low, j)\n if i < high:\n self.sortion(a, i, high)\n\n def printList(self, a):\n print(a)\n\n\nif __name__ == \"__main__\":\n glist = []\n low = 0\n launch = QuickSort()\n # counter = 0\n while True:\n try:\n col = int(input(\"Введите количество элементов сортировки: \"))\n # testcol = int(input(\"Введите количество тестов: \"))\n except:\n print(\"Вводить надо целочисленное число\")\n else:\n # while counter< testcol:\n for x in range(col):\n glist.append(x)\n random.shuffle(glist)\n high = len(glist) - 1\n print(\"Сгенерированный список: \", glist)\n print(\"Отсортированный список: \", end=\" \")\n start = time.time()\n launch.sortion(glist, low, high)\n launch.printList(glist)\n end = time.time()\n clock = round((end - start), 3)\n print(\"\\nВремя работы быстрой сортировки %r seconds\" % clock)\n # counter+=1\n # glist = []\n break\n","sub_path":"QuickSort/QuickSort.py","file_name":"QuickSort.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"34501920","text":"import os, sys, argparse, re\nfrom utils import * \nimport pandas as pd\nimport numpy as np\nfrom matplotlib import ticker\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--project', type=str, default='ARCADIA25um_surfaceDamage', help='Define patht to project.')\nparser.add_argument('--writeCSV', action='store_true', help='Convert tcl to csv, if not already done.')\nparser.add_argument('-threeD', '--threeD', action='store_true', help='3D measurement, assumes only particle transient measurement at the moment.')\nparser.add_argument('-m', '--measure', action='append', type=str, default=[], help='Define which plots you want to draw.', choices=['cv','iv','iv_p','iv_b','cv_b','tran','tran_3','tran_4','charge'])\nparser.add_argument('--log', action='store_true', help='Define if y axis on log scale.')\nparser.add_argument('--fit', action='store_true', help='Define if cv curve is fitted.')\nparser.add_argument('--fit_minX', type=int, help='Set fit range minimum.')\nparser.add_argument('--fit_maxX', type=int, help='Set fit range maximum.') \nparser.add_argument('--free', action='store_true', help='Free y range.')\nparser.add_argument('--maxX', type=int, help='Set draw range maximum.')\nparser.add_argument('-numP', '--Parameters', type=int, default=2, help='Define how many parameters are tested.')\nparser.add_argument('-th', '--thickness', type=int, default=100, help='Define silicon thickness for CCE.', required= 'tran_4' in sys.argv or 'tran' in sys.argv or 'tran_3' in sys.argv)\nparser.add_argument('--LET', type=float, default=1.28, help='Set the LET value for CCE calculation.', required= 'tran_4' in sys.argv or 'tran_3' in sys.argv)\nparser.add_argument('--scaleLET', type=int, default=1, help='Scaling of the LET for CCE.', required= 'tran_4' in sys.argv or 'tran_3' in sys.argv)\nparser.add_argument('--drawMap', action='store_true', help='Print out CCE per pixel in map.')\nparser.add_argument('-out', '--output', type=str, default='_', help='Define output file name..')\nparser.add_argument('-p1', '--par1', action='append', default=[], help='Fill arrays with parameter value.')\nparser.add_argument('-p2', '--par2', action='append', default=[], help='Fill arrays with parameter value.')\nparser.add_argument('-p3', '--par3', action='append', default=[], help='Fill arrays with parameter value.')\nparser.add_argument('-p4', '--par4', action='append', default=[], help='Fill arrays with parameter value.')\nparser.add_argument('-p5', '--par5', action='append', default=[], help='Fill arrays with parameter value.')\nparser.add_argument('-p1Name', '--par1Name', type=str, default=\"par1\", help='Set parameter name.')\nparser.add_argument('-p2Name', '--par2Name', type=str, default=\"par2\", help='Set parameter name.')\nparser.add_argument('-p3Name', '--par3Name', type=str, default=\"par3\", help='Set parameter name.')\nparser.add_argument('-p4Name', '--par4Name', type=str, default=\"par4\", help='Set parameter name.')\nparser.add_argument('-p5Name', '--par5Name', type=str, default=\"par5\", help='Set parameter name.')\n\nargs,_=parser.parse_known_args()\n\nthreeDim = args.threeD\nxmax = args.maxX\nif threeDim:\n print('Running on 3D simulation..')\nthickness = args.thickness\nnumP=args.Parameters\nif args.log:\n args.output += 'log_'\n\ntitles = dict([('cv', 'C [F/$\\mu$m]'),\n ('iv', 'I [A/$\\mu$m]'),\n ('iv_p', 'I$_{ptop}$ [A/$\\mu$m]'),\n ('iv_b', 'I$_{back}$ [A/$\\mu$m]'),\n ('cv_b', 'C$_{back}$ [F/$\\mu$m]'),\n ('tran', 'I$_{front}$ [$\\mu$A]'),\n ('tran_3', 'I$_{front}$ [$\\mu$A]'),\n ('tran_4', 'I$_{front}$ [$\\mu$A]'),\n ('charge', 'charge [pC]')\n ])\n\nranges = dict([('cv', [2e-16, 5e-15]),\n ('iv', [7e-17, 5e-13]),\n ('iv_p', [1e-20, 1e-5]),\n ('iv_b', [1e-16, 1e-5]),\n ('cv_b', [1e-16, 5e-15]),\n ('tran', [0, 1.2]),\n ('tran_3', [0, 0.2]),\n ('tran_4', [0, 0.2]),\n ('charge', [0, 1.2])\n ])\n\nif threeDim:\n ranges['cv']=[0, 2e-14]\n titles['cv']='C [F]'\n titles['iv']='I [A]'\n\n# write parameter permutations.. \narrayParPerm=[]\narrayParPermName=[]\nlegTitle=args.par1Name\nfor p1 in args.par1:\n parOption='-pS '+p1\n parPermName=p1\n # check of more than 1 par\n if len(args.par2)>0:\n legTitle=args.par1Name+'_'+args.par2Name\n numP=2\n for p2 in args.par2:\n parOption='-pS '+p1+' -pS '+p2\n parPermName=p1+'_'+p2\n if len(args.par3)>0:\n legTitle=args.par1Name+'_'+args.par2Name+'_'+args.par3Name\n numP=3\n for p3 in args.par3:\n parOption='-pS '+p1+' -pS '+p2+' -pS '+p3\n parPermName=p1+'_'+p2+'_'+p3\n if len(args.par4)>0:\n legTitle=args.par1Name+'_'+args.par2Name+'_'+args.par3Name+'_'+args.par4Name\n numP=4\n for p4 in args.par4:\n parOption='-pS '+p1+' -pS '+p2+' -pS '+p3+' -pS '+p4\n parPermName=p1+'_'+p2+'_'+p3+'_'+p4\n if len(args.par5)>0:\n legTitle=args.par1Name+'_'+args.par2Name+'_'+args.par3Name+'_'+args.par4Name+'_'+args.par5Name\n numP=5\n for p5 in args.par5:\n parOption='-pS '+p1+' -pS '+p2+' -pS '+p3+' -pS '+p4+' -pS '+p5\n parPermName=p1+'_'+p2+'_'+p3+'_'+p4+'_'+p5\n print(parOption)\n arrayParPerm.append(parOption)\n arrayParPermName.append(parPermName)\n else:\n print(parOption)\n arrayParPerm.append(parOption)\n arrayParPermName.append(parPermName)\n else:\n print(parOption)\n arrayParPerm.append(parOption)\n arrayParPermName.append(parPermName)\n else:\n print(parOption)\n arrayParPerm.append(parOption)\n arrayParPermName.append(parPermName)\n else:\n print(parOption)\n arrayParPerm.append(parOption)\n arrayParPermName.append(parPermName)\n\n# produce csv files \nif args.writeCSV:\n for parOption in arrayParPerm:\n print(parOption)\n if len(args.measure)>1:\n for im,m in enumerate(args.measure):\n if m=='cv':\n # write and run tcl files to store csv files in tmp/ for\n os.system('python3 python/writeTcl.py --project '+str(args.project)+' --'+str(m)+' '+parOption+' --run')\n else:\n # write and run tcl files to store csv files in tmp/ for\n os.system('python3 python/writeTcl.py --project '+str(args.project)+' --'+str(args.measure[0])+' '+parOption+' --run')\n \n# prepare for drawing\nmark=','\nlines=['-','--',':','-.']\n\n# create canvas\nprint(len(args.measure))\nfig, axs = plt.subplots(len(args.measure),1, sharex=True, sharey=False) #, gridspec_kw={'hspace': 0})\n# need two canvas' to include CCE\nif args.measure[0]=='tran' or args.measure[0]=='tran_4' or args.measure[0]=='tran_3':\n fig, axs = plt.subplots(2,1, sharex=True, sharey=False)\n\n# pick color map\ncolormap = plt.cm.nipy_spectral \ncolors = [colormap(i) for i in np.linspace(0, 0.9,len(arrayParPermName))]\n\n# arrays of Vdpl and Cend\nptVs=np.zeros(len(arrayParPermName), dtype=float)\nptIs=np.zeros(len(arrayParPermName), dtype=float)\ndeplVs=np.zeros(len(arrayParPermName), dtype=float)\ncapCs=np.zeros(len(arrayParPermName), dtype=[])\nendIs=np.zeros(len(arrayParPermName), dtype=[])\nCCEs1=[[0 for x in range(len(arrayParPermName))] for y in range(100)] \nCCEs2=[[0 for x in range(len(arrayParPermName))] for y in range(100)] \nCCEs3=[[0 for x in range(len(arrayParPermName))] for y in range(100)] \nCCEs4=[[0 for x in range(len(arrayParPermName))] for y in range(100)] \ntimes=[[0 for x in range(len(arrayParPermName))] for y in range(100)]\n\n# read csv files and analyse\nfor i,perm in enumerate(arrayParPermName):\n\n f1=csvFileName(args.project, args.measure[0], perm)\n print(f1)\n data1 = pd.read_csv(f1, names=[\"X\",\"Y\",\"X1\",\"Y1\",\"X2\",\"Y2\",\"X3\",\"Y3\"], skiprows=1)\n lab=str(perm)\n\n # if multiple measurements are analysed draw in multiple subplots\n if len(args.measure)>1:\n for im,m in enumerate(args.measure):\n f2=csvFileName(args.project, m, perm)\n data2 = pd.read_csv(f2, names=[\"X\",\"Y\"], skiprows=1)\n \n drawGraphLines(axs[im],abs(data2.X), abs(data2.Y),colors[i],lines[0],lab)\n axs[im].set_ylabel(titles[m])\n if not args.free:\n axs[im].set_ylim(ranges[m][0],ranges[m][1])\n if args.log:\n axs[im].set_yscale('log')\n if args.maxX:\n axs[im].set_xlim(-2, xmax)\n \n # if measurement is iv curve, fit and extract the punch through voltage\n if m=='iv_b':\n # set maximum current to be fitted for detemining punch through.. keep low for set-off\n ptV,Ipt=punchThrough(axs[im],abs(data2.X),abs(data2.Y),1e-13,colors[i])\n print('#############################')\n print( perm )\n print('Punch-through voltage found to be: {:.1f} V'.format(ptV))\n print('Current at punch through voltage: {:.4f} uA'.format(Ipt*pow(10,6)))\n print('#############################')\n ptVs[i]=ptV\n ptIs[i]=Ipt\n\n elif m=='iv_p':\n # set maximum current to be fitted for detemining punch through.. keep low for set-off \n newDat1x=np.array(abs(data2.X[ abs(data2.X) > float(3) ]))\n newDat1y=np.array(abs(data2.Y[ abs(data2.X) > float(3) ]))\n indMin=np.where(newDat1y == newDat1y.min())\n ptV = newDat1x[int(indMin[0])]\n Ipt = newDat1y[int(indMin[0])]\n drawVoltageLine(axs[im],ptV,colors[i])\n print('#############################')\n print( perm )\n print('Punch-through voltage found to be: {:.1f} V'.format(ptV))\n print('Current at punch through voltage: {:.4f} uA'.format(Ipt*pow(10,6)))\n print('#############################')\n ptVs[i]=ptV\n ptIs[i]=Ipt\n\n # if measurement is cv curve, fit and extract the depletion voltage \n elif m=='cv' and args.fit:\n if args.fit_maxX or args.fit_minX:\n deplV,deplC=deplVoltageRange(axs[im],data2,args.fit_minX, args.fit_maxX, colors[i])\n else:\n deplV,deplC=deplVoltage(axs[im],data2,colors[i])\n print('#############################')\n print('p1='+str(p1)+', p2='+str(p2))\n print('Depletion voltage found to be: {:.1f} V'.format(deplV))\n print('Capacitance at depletion voltage: {:.4f} fC'.format(deplC*pow(10,15)))\n print('#############################')\n deplVs[i]=deplV\n capCs[i]=deplC\n\n elif m=='cv_b' and args.fit:\n deplV,deplC=deplVoltageRange(axs[im],data2,args.fit_minX,args.fit_maxX,colors[i])\n print('#############################')\n print( perm )\n print('Depletion voltage found to be: {:.1f} V'.format(deplV))\n print('Capacitance at depletion voltage: {:.4f} fC'.format(deplC*pow(10,15)))\n print('#############################')\n deplVs[i]=deplV\n capCs[i]=deplC\n \n for im,m in enumerate(args.measure):\n f2=csvFileName(args.project, m, perm)\n data2 = pd.read_csv(f2, names=[\"X\",\"Y\"], skiprows=1)\n # check if punch-through/depletion voltage is detemined\n if (m=='iv' or m=='iv_b' or m=='iv_p') and find_element_in_list('cv',args.measure) and args.fit:\n xtmp=np.array(abs(data2.X))\n vbin=np.where((xtmpdeplVs[i]-0.5) )\n ytmp=np.array(abs(data2.Y))\n idpl=float(ytmp[vbin])\n print('Current at depletion voltage: {:.4f} pA'.format(idpl*pow(10,12)))\n print('#############################')\n drawVoltagePoint(axs[im],deplVs[i],idpl,colors[i])\n \n elif m=='cv' and ( find_element_in_list('iv_p',args.measure) or find_element_in_list('iv_b',args.measure) ) and args.fit:\n print('#############################')\n print('Draw punch-through.. ', ptVs[i])\n drawVoltageLine(axs[im],ptVs[i],colors[i])\n\n \n # if only one measurement, draw in one canvas\n else:\n if args.measure[0]=='tran' or args.measure[0]=='tran_3' or args.measure[0]=='tran_4':\n if args.measure[0]=='tran':\n drawGraphLines(axs[0],data1.X*pow(10,9), data1.Y*pow(10,6),colors[i],lines[0],lab)\n elif args.measure[0]=='tran_3':\n draw3MultiGraphLines(axs[0],data1.X*pow(10,9), data1.Y*pow(10,6), data1.Y1*pow(10,6), data1.Y2*pow(10,6), lines[0])\n elif args.measure[0]=='tran_4':\n draw4MultiGraphLines(axs[0],data1.X*pow(10,9), data1.Y*pow(10,6), data1.Y1*pow(10,6), data1.Y2*pow(10,6), data1.Y3*pow(10,6),lines[0])\n #set x range\n axs[0].set_xlim(-2, axs[0].get_xlim()[1])\n if args.maxX:\n axs[1].set_xlim(-2, xmax)\n axs[0].set_xlim(-2, xmax)\n\n # fill array with time bins for hit maps\n times[i]=data1.X*pow(10,9)\n\n # integrate current over 200ns\n # C=A*s\n totalCharge=abs(getIntegral(data1.Y,data1.X)* pow(10,12)) #,0,10*pow(10,-9)) * pow(10,12)\n # search for the LET value in string \n # transform from pC/um to pC\n print(\"Found LET of: \", args.LET)\n final_let=float( args.LET ) * pow(10,-5) * thickness\n # print(\"Convert to LET*thickness: \", final_let)\n # normalise to 1/4 when running in 4 pixel domain, assumes particle in 0,0\n if args.scaleLET != 1:\n final_let = final_let / float(args.scaleLET)\n cce = totalCharge/final_let\n print('#############################')\n print( perm )\n print('Total charge : {:.4f}x10^-5 pC'.format(totalCharge*pow(10,5)))\n print('LET : {:.4f}x10^-5 pC'.format(final_let*pow(10,5)))\n print('Charge collection efficiency: {:.1f} %'.format(cce*100))\n print('#############################')\n \n # draw CCE over time\n if args.measure[0]=='tran':\n CCEs1[i] = drawCCE(axs[1],data1.X,data1.Y,final_let,colors[i],lines[0],lab)\n axs[1].set_ylabel('CCE')\n\n # draw CCEs over time and return 3 arrays\n elif args.measure[0]=='tran_3':\n CCEs1[i], CCEs2[i], CCEs3[i] = draw3MultiCCE(axs[1],data1.X,data1.Y,data1.Y1,data1.Y2,final_let,lines[0])\n axs[1].set_ylabel(r'CCE $\\times$ '+str(args.scaleLET))\n axs[0].set_xlim(-1,10) #lorenzo\n \n elif args.measure[0]=='tran_4':\n CCEs1[i], CCEs2[i], CCEs3[i], CCEs4[i] = draw4MultiCCE(axs[1],data1.X,data1.Y,data1.Y1,data1.Y2,data1.Y3,final_let,lines[0])\n axs[1].set_ylabel(r'CCE $\\times$ '+str(args.scaleLET))\n \n time95 = getTime(times[i],CCEs1[i],95)\n time99 = getTime(times[i],CCEs1[i],99)\n print('Time of 95% collection: {:.1f} ns'.format(time95))\n print('Time of 99% collection: {:.1f} ns'.format(time99))\n print('#############################')\n\n axs[1].set_xlabel('time [ns]')\n startY=0.05\n deltaY=0.2*i\n if args.log:\n startY=1E-6\n deltaY=pow(10,-6+i)\n axs[1].text( axs[0].get_xlim()[1]/2., startY+deltaY, '$t_{95}=$'+str(\"{:.1f}ns\").format(time95)+', $t_{99}=$'+str(\"{:.1f}ns\").format(time99), color=colors[i])\n axs[0].set_ylabel(titles['tran'])\n\n if not args.free:\n axs[1].set_ylim(0,1.2)\n\n if args.log:\n axs[0].set_yscale('log')\n axs[1].set_yscale('log')\n\n axs[1].set_xticks(np.arange(0, int(axs[1].get_xlim()[1]), int(axs[1].get_xlim()[1]/10.) ))\n plt.grid(True)\n\n #axs[1].set_ylim(10e-5,2) #lorenzo\n\n elif args.measure[0]=='charge':\n drawGraphLines(axs,data1.X*pow(10,9), data1.Y*pow(10,12),colors[i],lines[0],lab)\n axs.set_xlabel('time [ns]')\n axs.set_xlim(-2,50)\n axs.set_ylabel(titles[args.measure[0]])\n if not args.free:\n axs.set_ylim(ranges[args.measure[0]][0],ranges[args.measure[0]][1])\n\n else:\n dat1x=abs(data1.X)\n dat1y=abs(data1.Y)\n\n if args.measure[0]=='iv_b' or args.measure[0]=='iv_p' or args.measure[0]=='iv':\n drawGraphLines(axs,dat1x,dat1y,colors[i],lines[0],lab)\n # if measure current at back, determine punch through voltage\n if args.measure[0]=='iv_b' and args.fit:\n # set maximum current to be fitted for detemining punch through.. keep low for set-off\n ptV,Ipt=punchThrough(axs,dat1x,dat1y,1e-14,colors[i])\n print('#############################')\n print( perm )\n print('Punch-through voltage found to be: {:.1f} V'.format(ptV))\n print('Current at punch through voltage: {:.4f} uA'.format(Ipt*pow(10,6)))\n print('#############################')\n\n # if measure current at p well, determine punch through voltage\n elif args.measure[0]=='iv_p' and args.fit:\n # set maximum current to be fitted for detemining punch through.. keep low for set-off \n newDat1x=np.array(dat1x[ dat1x > float(3) ])\n newDat1y=np.array(dat1y[ dat1x > float(3) ])\n indMin=np.where(newDat1y == newDat1y.min())\n ptV = newDat1x[int(indMin[0])]\n Ipt = newDat1y[int(indMin[0])]\n drawVoltageLine(axs,ptV,colors[i])\n\n print('#############################')\n print( perm )\n print('Punch-through voltage found to be: {:.1f} V'.format(ptV))\n print('Current at punch through voltage: {:.4f} uA'.format(Ipt*pow(10,6)))\n print('#############################')\n else:\n drawGraphLines(axs,abs(data1.X), data1.Y,colors[i],lines[0],lab)\n axs.set_xlabel('|V|')\n axs.set_ylabel(titles[args.measure[0]])\n if not args.free:\n axs.set_ylim(ranges[args.measure[0]][0],ranges[args.measure[0]][1])\n if args.log:\n axs.set_yscale('log')\n # if measurement is cv curve, fit and extract the depletion voltage\n if args.measure[0]=='cv' and args.fit:\n if args.fit_maxX or args.fit_minX:\n deplV,deplC=deplVoltageRange(axs,data1,args.fit_minX, args.fit_maxX, colors[i])\n else:\n deplV,deplC=deplVoltage(axs,data1,colors[i])\n print('#############################')\n print( perm )\n print('Depletion voltage found to be: {:.1f} V'.format(deplV))\n print('Capacitance at depletion voltage: {:.4f} fC'.format(deplC*pow(10,15)))\n print('#############################')\n deplVs[i]=deplV\n capCs[i]=deplC\n \n elif args.measure[0]=='cv_b' and args.fit:\n deplV,deplC=deplVoltageRange(axs,data1,args.fit_minX,args.fit_maxX,colors[i])\n print('#############################')\n print( perm )\n print('Depletion voltage found to be: {:.1f} V'.format(deplV))\n print('Capacitance at depletion voltage: {:.4f} fC'.format(deplC*pow(10,15)))\n print('#############################')\n deplVs[i]=deplV\n capCs[i]=deplC\n\n \n# draw all measured curves\nallM=''\nfor m in args.measure:\n allM=allM+m\noutName=allCurvesName(args.project, allM, args.output, 'pdf')\nprint(outName)\n\nif len(args.measure)>1 or args.measure[0]=='tran_4' or args.measure[0]=='tran_3' or args.measure[0]=='tran':\n if args.measure[0]=='tran_4' or args.measure[0]=='tran_3' or args.measure[0]=='tran':\n plt.subplots_adjust(top=0.8)\n colmn=4\n if len(arrayParPermName)>1:\n colmn=int(len(arrayParPermName)/2.)\n else:\n legTitle=legTitle+'\\n'+arrayParPermName[0]\n\n axs[0].legend(loc='upper center', bbox_to_anchor=(.5, 1.5), fancybox=True, ncol=colmn, title=legTitle)\n else:\n axs[0].legend(title=legTitle, loc='upper left', bbox_to_anchor=(1, 0.5), fancybox=True)\n axs[len(args.measure)-1].set_xlabel('|V|')\nelse:\n axs.legend(title=legTitle, loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True)\nif not args.measure[0]=='tran_4' and not args.measure[0]=='tran_3' and not args.measure[0]=='tran':\n if numP>3:\n plt.subplots_adjust(right=0.5)\n else:\n plt.subplots_adjust(right=0.65)\n# print out \nfig.savefig(outName)\n\n\n# to be moved outside\n# draw hit maps for trans_4 measurements \nif args.measure[0]=='tran_4' and args.drawMap:\n timeBin=math.floor(len(times[0])/10.)\n # sample time in 10 \n for t in range(1,10):\n timeValue=int(0)\n realTime=t*timeBin\n # for last time bin, use the highest entry\n if t==9:\n timeValue=int(times[0][len(times[0])-1])\n else:\n timeValue=int(times[0][realTime])\n\n plotOutName=allCurvesName(args.project, 'CCE_map_'+str(timeValue)+'ns_'+allM, args.output, 'pdf')\n print(plotOutName)\n\n matrix_X = 0\n matrix_Y = 0\n # draw map in 4x4 if --scaleLET 4\n if args.scaleLET==4:\n matrix_X = 4\n matrix_Y = 4\n arrX=[1,0,0,1]\n arrY=[1,0,0,1]\n arrXYZ=[[0 for x in range(4)] for y in range(4)]\n for pixX in range(0,4):\n for pixY in range(0,4):\n weight=0\n if (pixX==0 and pixY==0) or (pixX==3 and pixY==3) or (pixX==0 and pixY==3) or (pixX==3 and pixY==0):\n weight=CCEs4[0][realTime]\n elif (pixX==1 or pixX==2) and (pixY==2 or pixY==1):\n weight=CCEs1[0][realTime]\n else:\n weight=CCEs2[0][realTime]\n #print(pixX,pixY)\n #print(weight)\n arrXYZ[pixY][pixX]=weight*100./4.\n\n # draw map in 3x4 if --scaleLET 4\n if args.scaleLET==2:\n matrix_X = 4\n matrix_Y = 3\n arrX=[1,0,0,1]\n arrY=[1,0,1]\n arrXYZ=[[0 for x in range(4)] for y in range(3)]\n for pixX in range(0,4):\n for pixY in range(0,3):\n weight=0\n if (pixX==1 or pixX==2) and pixY==1:\n weight=CCEs1[0][realTime]\n elif (pixX==0 and pixY==0) or (pixX==3 and pixY==2) or (pixX==0 and pixY==2) or (pixX==3 and pixY==0):\n weight=CCEs4[0][realTime]\n elif (pixX==0 and pixY==1) or (pixX==3 and pixY==1):\n weight=CCEs3[0][realTime]\n else:\n weight=CCEs2[0][realTime]\n arrXYZ[pixY][pixX]=weight*100./2.\n\n # draw map in 3x3 if --scaleLET 1\n if args.scaleLET==1:\n matrix_X = 3\n matrix_Y = 3\n arrX=[1,0,1]\n arrY=[1,0,1]\n arrXYZ=[[0 for x in range(3)] for y in range(3)]\n for pixX in range(0,3):\n for pixY in range(0,3):\n weight=0\n if (pixX==1 and pixY==1): # or (pixX==3 and pixY==3) or (pixX==0 and pixY==3) or (pixX==3 and pixY==0):\n weight=CCEs1[0][realTime]\n elif (pixX==1 or pixY==1):\n weight=CCEs2[0][realTime]\n else:\n weight=CCEs4[0][realTime]\n arrXYZ[pixX][pixY]=weight*100.\n\n fig, ax = plt.subplots()\n im = ax.imshow(arrXYZ, cmap='viridis', vmin=0, vmax=int(120/float(args.scaleLET)))\n ax.set_xticks(np.arange(matrix_X))\n ax.set_yticks(np.arange(matrix_Y))\n # ... and label them with the respective list entries\n ax.set_xticklabels(arrX)\n ax.set_yticklabels(arrY)\n #ax.imshow(arrXYZ)\n ax.set_xticks(np.arange(matrix_X+1)-.5, minor=True)\n ax.set_yticks(np.arange(matrix_Y+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n # write values in pixel\n for pixX in range(0,matrix_X):\n for pixY in range(0,matrix_Y):\n text = ax.text(pixX, pixY, \"{0:.1f}\".format(arrXYZ[pixY][pixX]),\n ha=\"center\", va=\"center\", color=\"w\")\n \n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, cmap=\"viridis\")\n cbar.ax.set_ylabel('CCE [%] $\\Delta$t='+str(timeValue)+'ns', rotation=-90, va=\"bottom\")\n # add impinging point\n x = matrix_X/2. - 0.5 #math.floor(matrix/2.)\n y = matrix_Y/2. - 0.5 #math.floor(matrix/2.)\n # print(x,y)\n ax.scatter(x,y,color='r')\n fig.tight_layout()\n fig.savefig(plotOutName)\n\n\n\n\n\n# draw hit maps for trans_4 measurements\nif args.measure[0]=='tran_3' and args.drawMap:\n timeBin=math.floor(len(times[0])/10.)\n # sample time in 10\n \n for t in range(1,10):\n timeValue=int(0)\n realTime=t*timeBin\n # for last time bin, use the highest entry\n if t==9:\n timeValue=int(times[0][len(times[0])-1])\n else:\n timeValue=int(times[0][realTime])\n \n plotOutName=allCurvesName(args.project, 'CCE_map_'+str(timeValue)+'ns_'+allM, args.output, 'pdf')\n print(plotOutName)\n \n matrix = 0\n # draw map in 4x4 if --scaleLET 4\n if args.scaleLET==4:\n matrix = 5\n arrX=[20,10,0,10,20]\n arrY=[0]\n arrXYZ=[[0 for x in range(0,5)] for y in range(0,1)]\n #print(arrXYZ)\n for pixX in range(0,5):\n for pixY in range(0,1):\n weight=0\n if (pixX==0 and pixY==0) or (pixX==4 and pixY==0):\n weight=CCEs3[0][realTime]\n elif (pixX==1 or pixX==3) and (pixY==0):\n weight=CCEs2[0][realTime]\n else:\n weight=CCEs1[0][realTime]\n #print(pixX,pixY)\n #print(weight)\n arrXYZ[pixY][pixX]=weight*100./4.\n #print(arrXYZ)\n #print(len(arrXYZ))\n \n # draw map in 4x4 if --scaleLET 4\n if args.scaleLET==1:\n matrix = 5\n arrX=[20,10,0,10,20]\n arrY=[0]\n arrXYZ=[[0 for x in range(0,5)] for y in range(0,1)]\n #print(arrXYZ)\n for pixX in range(0,5):\n for pixY in range(0,1):\n weight=0\n if (pixX==0 and pixY==0) or (pixX==4 and pixY==0):\n weight=CCEs1[0][realTime]\n elif (pixX==1 or pixX==3) and (pixY==0):\n weight=CCEs2[0][realTime]\n else:\n weight=CCEs3[0][realTime]\n #print(pixX,pixY)\n #print(weight)\n arrXYZ[pixY][pixX]=weight*100.\n #print(arrXYZ)\n #print(len(arrXYZ))\n\n fig, ax = plt.subplots()\n im = ax.imshow(arrXYZ, cmap='viridis', vmin=0, vmax=int(120/float(args.scaleLET)), aspect=5)\n ax.set_xticks(np.arange(5)) #np.arange(matrix)\n ax.set_yticks(np.arange(1)) #np.arange(matrix)\n # ... and label them with the respective list entries\n ax.set_xticklabels(arrX)\n ax.set_yticklabels(arrY)\n #ax.imshow(arrXYZ)\n ax.set_xticks(np.arange(len(arrXYZ[0])+1)-.5, minor=True)\n ax.set_yticks(np.arange(len(arrXYZ)+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n # write values in pixel\n for pixX in range(0,matrix):\n for pixY in range(0,1):\n text = ax.text(pixX, pixY, \"{0:.1f}\".format(arrXYZ[pixY][pixX]),\n ha=\"center\", va=\"bottom\", color=\"w\")\n \n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, cmap=\"viridis\")\n cbar.ax.set_ylabel('CCE [%] $\\Delta$t='+str(timeValue)+'ns', rotation=-90, va=\"bottom\")\n # add impinging point\n x = matrix/2. - 0.5 #math.floor(matrix/2.)\n y = 0 #matrix/2. - 0.5 #math.floor(matrix/2.)\n # print(x,y)\n ax.scatter(x,y,color='r')\n fig.tight_layout()\n fig.savefig(plotOutName)\n \n","sub_path":"python/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":30034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"394002363","text":"\nfrom api.models import Token\n\n\ndef permission_check(request, role=None):\n \"\"\"Permission checker.\"\"\"\n if 'token' not in request.headers:\n return False\n try:\n token_obj = Token.objects.get(token=request.headers['token'])\n user_obj = token_obj.user\n if user_obj and not role:\n return True\n user_groups = ', '.join(map(str, user_obj.groups.all()))\n if role in user_groups:\n return True\n else:\n return False\n except Exception:\n return False\n","sub_path":"oto_project/api/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"381275601","text":"import pytest\n\nfrom replay.models import Action\n\npytestmark = pytest.mark.django_db # pylint: disable=invalid-name\n\ndef test_action():\n action = Action.objects.create(\n name='Test',\n method='GET',\n path='/',\n data='{}',\n files='{}',\n status_code='200',\n content='',\n )\n assert str(action) == 'Test'\n","sub_path":"replay/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"239839806","text":"\"\"\"Add star_threshold to table server_config\n\nRevision ID: 7baf663f0bb2\nRevises: b58e52154ab0\nCreate Date: 2019-03-16 20:10:54.943038\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '7baf663f0bb2'\ndown_revision = 'b58e52154ab0'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('server_config', sa.Column('star_threshold', sa.SmallInteger(), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('server_config', 'star_threshold')\n # ### end Alembic commands ###\n","sub_path":"alembic/versions/7baf663f0bb2_add_star_threshold_to_table_server_.py","file_name":"7baf663f0bb2_add_star_threshold_to_table_server_.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"471362548","text":"import Descrpition_Methods\r\nfrom sknn.mlp import Layer,Regressor\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport numpy as np\r\nimport random\r\nimport pickle\r\n\r\n'''test with goals scored and conceded as well as matchday inputs.\r\ntry different network structures, added elos differences from last\r\n5 matchdays in description methods'''\r\n\r\nmatchday_tables = Descrpition_Methods.matchday_tables_1\r\nleague_data = Descrpition_Methods.league_data_1\r\n\r\ntrain_data,test_data,train_labels,test_labels = Descrpition_Methods.get_features_test(matchday_tables, league_data, 2015, 34)\r\n\r\n'''\r\ndata = list(train_data) + list(test_data)\r\nlabels = list(train_labels) + list(test_labels)\r\n\r\ntemp_list = []\r\nfor i in range(len(data)):\r\n temp_list.append([data[i],labels[i]])\r\n \r\ntemp_list = list(temp_list)\r\n\r\ntrain_data = []\r\ntrain_labels = []\r\ntest_data = []\r\ntest_labels = []\r\nfor _ in range(5814):\r\n temp = random.choice(range(len(temp_list)))\r\n train_data.append(temp_list[temp][0])\r\n train_labels.append(temp_list[temp][1])\r\n temp_list.pop(temp)\r\n\r\nfor i in temp_list:\r\n test_data.append(i[0])\r\n test_labels.append(i[1])\r\n'''\r\ndef scale_data_test(train_data,test_data):\r\n scaler = MinMaxScaler()\r\n scaler.fit(train_data)\r\n train_data = scaler.transform(train_data)\r\n test_data = scaler.transform(test_data)\r\n return train_data,test_data\r\n\r\ndef evaluation(results,labels):\r\n summation = 0\r\n for i in range(len(results)):\r\n if list(results[i]) == list(labels[i]):\r\n summation += 1\r\n return summation/len(results)\r\n\r\ndef transform(data):\r\n output = np.zeros((len(data),len(data[0])))\r\n for i in range(len(data)):\r\n maximum = np.argmax(data[i])\r\n output[i][maximum] = 1\r\n return output\r\n\r\ntrain_data,test_data = scale_data_test(train_data, test_data)\r\n\r\ntrain_data = np.array(train_data)\r\ntest_data = np.array(test_data)\r\ntrain_labels = np.array(train_labels)\r\n\r\nnn_1 = Regressor(\r\n layers=[\r\n Layer(\"Rectifier\", units=75),\r\n Layer(\"Rectifier\", units=75),\r\n Layer(\"Rectifier\", units=75),\r\n Layer(\"Rectifier\", units=75),\r\n Layer(\"Softmax\")],\r\n learning_rate=0.001,\r\n n_iter=25,\r\n regularize='L2')\r\n\r\nnn_1.fit(train_data,train_labels)\r\n\r\nresults = nn_1.predict(test_data)\r\nresults_old = np.around(results,3)\r\n\r\n#with open('last.pickle','rb') as last:\r\n# old = pickle.load(last)\r\n \r\nwith open('last.pickle','wb') as last:\r\n pickle.dump(results,last)\r\n\r\n#mse = np.mean(np.abs(results-old))\r\n#maximum = np.max(np.abs(results-old))\r\n#standard = np.std(np.abs(results-old))\r\n\r\nresults = transform(results)\r\n\r\nprint(evaluation(results, test_labels), \"richtig\")\r\n\r\n#print(mse, \"differenz zu letzter vorhersage\")\r\n\r\n#print(standard, \"standardabweichung letzte vorhersage\")\r\n\r\n#print(maximum, \"maximaler unterschied\")\r\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"540881824","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\"\"\"Register factory.\n\"\"\"\n\nimport importlib\nimport os\nfrom os.path import dirname\nimport sys\nimport glob\nfrom absl import logging\n# logging.set_verbosity(logging.DEBUG)\ntry:\n import xt\nexcept ModuleNotFoundError as err:\n xt = None\n\n\nclass Register(object):\n \"\"\"Register module\"\"\"\n\n def __init__(self, name):\n self._dict = {}\n self._name = name\n\n def __setitem__(self, key, value):\n if not callable(value):\n raise Exception(\"Value of a Registry must be callable.\")\n if key is None:\n key = value.__name__\n if key in self._dict:\n logging.warning(\"Key:{} already in registry {}.\".format(key, self._name))\n self._dict[key] = value\n\n def register(self, param):\n \"\"\"Decorator to register a function or class.\"\"\"\n\n def decorator(key, value):\n self[key] = value\n return value\n\n if callable(param):\n # @reg.register\n return decorator(None, param)\n # @reg.register('alias')\n return lambda x: decorator(param, x)\n\n def __getitem__(self, key):\n try:\n return self._dict[key]\n except Exception as e:\n logging.error(\"module {} not found: {}\".format(key, e))\n raise e\n\n def __contains__(self, key):\n return key in self._dict\n\n def keys(self):\n \"\"\"return all the keys contained\"\"\"\n return self._dict.keys()\n\n\nclass Registers(object): # pylint: disable=invalid-name, too-few-public-methods\n \"\"\"All module registers within it.\"\"\"\n\n def __init__(self):\n raise RuntimeError(\"Registries is not intended to be instantiated\")\n\n agent = Register(\"agent\")\n model = Register(\"model\")\n algorithm = Register(\"algorithm\")\n env = Register(\"env\")\n comm = Register(\"comm\")\n\n\ndef path_to_module_format(py_path):\n \"\"\"Transform a python/file/path to module format match to the importlib.\"\"\"\n # return py_path.replace(\"/\", \".\").rstrip(\".py\")\n return os.path.splitext(py_path)[0].replace(\"/\", \".\")\n\n\ndef _keep_patten(file_name):\n return False if \"__init__\" in file_name or \"_conf\" in file_name else True\n\n\ndef _catch_subdir_modules(module_path):\n # print(\"pwd: \", os.getcwd())\n if xt:\n work_path = os.path.join(dirname(dirname(xt.__file__)), module_path)\n else:\n work_path = module_path\n target_file = glob.glob(\"{}/*/*.py\".format(work_path))\n model_path_len = len(work_path)\n target_file_clip = [item[model_path_len:] for item in target_file]\n used_file = [item for item in target_file_clip if _keep_patten(item)]\n\n return [path_to_module_format(_item) for _item in used_file]\n\n\ndef _catch_peer_modules(module_path):\n if xt:\n work_path = os.path.join(dirname(dirname(xt.__file__)), module_path)\n else:\n work_path = module_path\n\n target_file = glob.glob(\"{}/*.py\".format(work_path))\n model_path_len = len(work_path)\n target_file_clip = [item[model_path_len:] for item in target_file]\n\n used_file = [item for item in target_file_clip if _keep_patten(item)]\n\n return [path_to_module_format(_item) for _item in used_file]\n\n\nMODEL_MODULES = _catch_subdir_modules(\"xt/model\")\nALG_MODULES = _catch_subdir_modules(\"xt/algorithm\")\nAGENT_MODULES = _catch_subdir_modules(\"xt/agent\")\nENV_MODULES = _catch_subdir_modules(\"xt/environment\")\n\nCOMM_MODULES = _catch_peer_modules(\"xt/framework/comm\")\n\nALL_MODULES = [\n (\"xt.model\", MODEL_MODULES),\n (\"xt.algorithm\", ALG_MODULES),\n (\"xt.agent\", AGENT_MODULES),\n (\"xt.environment\", ENV_MODULES),\n (\"xt.framework.comm\", COMM_MODULES),\n]\n\n\ndef _handle_errors(errors):\n \"\"\"Log out and possibly re-raise errors during import.\"\"\"\n if not errors:\n return\n for name, err in errors:\n logging.warning(\"Module {} import failed: {}\".format(name, err))\n # logging.fatal(\"Please check these modules.\") # could shutdown\n\n\ndef get_custom_modules(config=None):\n \"\"\"Add custom modules to system_modules\n # import xt.agent.custom_agent/*.py, the config will be follows\n {\"custom_modules\": [xt.agent.custom_agent]}\n \"\"\"\n custom_module_path = list()\n\n if config is not None and \"custom_modules\" in config:\n custom_modules = config[\"custom_modules\"]\n if not isinstance(custom_modules, list):\n custom_modules = [custom_modules]\n custom_module_path.extend(\n [(\"\", [path_to_module_format(module)]) for module in custom_modules]\n )\n return custom_module_path\n\n\ndef import_all_modules_for_register(custom_module_config=None):\n \"\"\"Import all modules for register.\"\"\"\n # add `pwd` into path.\n current_work_dir = os.getcwd()\n if current_work_dir not in sys.path:\n sys.path.append(current_work_dir)\n\n all_modules = ALL_MODULES\n all_modules.extend(get_custom_modules(custom_module_config))\n\n logging.debug(\"import all_modules: {}\".format(all_modules))\n errors = []\n\n for base_dir, modules in all_modules:\n for name in modules:\n try:\n if base_dir != \"\":\n full_name = base_dir + name\n else:\n full_name = name\n importlib.import_module(full_name)\n logging.debug(\"{} loaded.\".format(full_name))\n except ImportError as error:\n errors.append((name, error))\n _handle_errors(errors)\n","sub_path":"built-in/TensorFlow/Research/reinforcement-learning/DQN_for_TensorFlow/rl/xt/framework/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":6565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"121590832","text":"from datetime import datetime\n\n\nclass Solution:\n def daysBetweenDates(self, date1: str, date2: str) -> int:\n d1 = list(map(int, date1.split('-')))\n d2 = list(map(int, date2.split('-')))\n dt1 = datetime(d1[0], d1[1], d1[2])\n dt2 = datetime(d2[0], d2[1], d2[2])\n dif = dt1 - dt2\n return abs(dif.days)\n","sub_path":"leetcode/wc177/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"60921582","text":"####coding: utf-8\n'''\nCreated on 12 Mar 2018\n\n@author: lbtanh\n'''\nfrom os.path import dirname, join, realpath\nimport sys\nimport argparse\n\nfrom make_dataset import ROOT_DIR, seriallize_face_encodings\n# from __future__ import division\nimport cv2\nimport glob, pickle, os\nimport numpy as np\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import make_pipeline, Pipeline\n\nENCODINGS_PER_INDIVIDUAL_THRESHOLD = 20\n \n \ndef test_classifier(): \n # ////////// load model //////////////////\n clf = pickle.load(open('SVM_linear.model', 'rb'))\n # ////// run predict on image //////////////\n img = resize_image('D:/Dataset/7_face/5380/5380_normal_1.jpg')\n image = np.array(img)\n face_encoding = face_recognition.face_encodings(image)\n if len(face_encoding) == 0:\n print(\"no face detected on this one\")\n else:\n predict_image = np.array(face_encoding[0])\n result = clf.predict(predict_image.reshape(1, -1))\n\ndef create_classifier(X, y, clf_type):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n print(X_train.shape)\n #Set the classifier as a support vector machines with polynomial kernel\n # clf = SVC(kernel=\"poly\", degree=3, coef0=1, C=5)\n # ////////// training and save model//////////////// \n shuffle_index = np.random.permutation(len(X_train))\n X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]\n print('Creating classifier: ' + clf_type)\n \n if clf_type == \"l_svm\":\n# clf = Pipeline(((\"scaler\", StandardScaler()),(\"linear_svc\", LinearSVC(C=1, loss=\"hinge\")),))\n clf = SVC(kernel='linear', probability=True, tol=1e-3)#, verbose = True)\n model_path = 'SVM_linear.model'\n clf.fit(X_train, y_train)\n \n elif clf_type == 'nl_svm':\n model_path = 'non_linear.model'\n pipe_svc = make_pipeline(StandardScaler(),SVC(random_state=1))\n param_range = [0.0001, 0.001, 0.01, 0.1,1.0, 10.0, 100.0, 1000.0]\n param_grid = [{'svc__C': param_range,'svc__kernel': ['linear']},{'svc__C': param_range,'svc__gamma': param_range,'svc__kernel': ['rbf']}]\n clf = GridSearchCV(estimator=pipe_svc,param_grid=param_grid,scoring='accuracy',cv=5,n_jobs=-1)\n clf = clf.fit(X_train, y_train)\n print(clf.best_score_)\n\n print(clf.best_params_) \n \n elif clf_type == \"nn\":\n clf = MLPClassifier(solver='lbfgs', alpha=1e-2,\n hidden_layer_sizes=(512, 100), random_state=1)\n model_path = 'nn.model'\n clf.fit(X_train, y_train)\n print(clf)\n \n \n pickle.dump(clf, open(model_path, 'wb'))\n pred_score = clf.score(X_test, y_test)\n print(\"Prediction score: \", pred_score)\n\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('--is_train', required=False, default=1, \n help='train model')\n parser.add_argument('--type_model', required=False, default='nl_svm', \n help='')\n return parser.parse_args(argv) \n\ndef main(args):\n if int(args.is_train):\n if args.type_model in [\"nn\", \"l_svm\", \"nl_svm\"]:\n # X, y = [pickle.load(open(data+'.pickle', 'rb')) for data in ['X', 'y']]\n face_embeddings_path = join(ROOT_DIR, 'models/embeddings/face_encodings_2.pickle')\n X, Y = seriallize_face_encodings(face_embeddings_path, 10)\n print(len(X), len(Y))\n create_classifier(X, Y, args.type_model)\n else:\n print(\"Unexpected type of classifier\")\n\n \nif __name__=='__main__':\n main(parse_arguments(sys.argv[1:]))\n\n ","sub_path":"faceidsys_tools/lib/face_classifier_train.py","file_name":"face_classifier_train.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"391618231","text":"# Last edited May 31st, 2017\r\n\r\nimport numpy as np\r\n\r\ndef repeated_touches(touches, repeated_touch_threshold):\r\n \"\"\"This function filters out touches on the same electrode channel that\r\n occur within a certain given amount of time. Usually, repeated touches are\r\n caused by a mouse repositioning its foot on the side ledge and therefore\r\n should not count as separate foot faults.\r\n\r\n Input arguments\r\n touches: a 2D numpy array containing the individual touch data\r\n repeated_touch_threshold: the threshold (in seconds) that determines how\r\n close in time repeated touches onto the same channel have to be in order to\r\n be filtered out\r\n\r\n Output\r\n no_rt: a 2D numpy array containing the filtered individual touch data\r\n deleted: a 2D numpy array containing the deleted touch data\"\"\"\r\n\r\n # Get all channels in which more than 1 touch occured\r\n unique, counts = np.unique(touches[:,1], return_counts=True)\r\n repeated_touched_channels = [unique[index] for index, count in enumerate(counts) if count > 1]\r\n\r\n # Get a boolean mask of rows of which the channel is in repeated_touched_channels\r\n mask = np.in1d(touches[:,1].ravel(), repeated_touched_channels).reshape(touches[:,1].shape)\r\n\r\n # rt is equal to all the rows in touches for which the mask is True\r\n rt = touches[mask]\r\n rt = rt[np.argsort(rt[:,2])][::-1] # order so that first touch comes last\r\n\r\n # no_rt is equal to all the rows in touches for which the mask is False\r\n no_rt = touches[~mask]\r\n\r\n # Keep an array of all touches that are deleted\r\n deleted = np.zeros([0,4])\r\n\r\n # For each channel, remove the shortest touch of pairs that follow each\r\n # other within the repeated_touch_threshold\r\n for ch in repeated_touched_channels:\r\n\r\n subarray = rt[np.where(rt[:,1] == ch)]\r\n stop = False\r\n\r\n while len(subarray) > 1 and stop == False:\r\n\r\n # While there are still multiple touches left, compare the start\r\n # time of each touch against the next one. If a touch is to be\r\n # deleted, restart the loop. If no touch is deleted, exit the while\r\n # loop\r\n for row in range(len(subarray)):\r\n\r\n # If the difference between start times of both touches is\r\n # smaller than or equal to the threshold...\r\n if subarray[row,2] - subarray[row+1,2] <= repeated_touch_threshold:\r\n # And 1) the duration of the first touch is smaller than\r\n # that of the second one, or 2) it is the start channel (47):\r\n if subarray[row,3] < subarray[row+1,3] or subarray[row,1] == 47:\r\n # Delete the first (=shorter) touch\r\n deleted = np.vstack([deleted, subarray[row]])\r\n subarray = np.delete(subarray,row,0)\r\n # If there is only 1 touch left:\r\n if len(subarray) < 2:\r\n no_rt = np.vstack([no_rt, subarray]) # keep that touch\r\n # Restart the for loop with the updated subarray\r\n break\r\n # Else 1) if the duration of the first touch is greater than\r\n # that of the second one, or 2) it is the finish channel (0)\r\n elif subarray[row,3] >= subarray[row+1,3] or subarray[row,1] == 0:\r\n # Delete the second (=shorter) touch\r\n deleted = np.vstack([deleted, subarray[row+1]])\r\n subarray = np.delete(subarray,row+1,0)\r\n # If there is only 1 touch left:\r\n if np.shape(subarray)[0] < 2:\r\n no_rt = np.vstack([no_rt, subarray]) # keep that touch\r\n # Restart the for loop with the updated subarray\r\n break\r\n # If no multiple touches remain and, as a result, the for loop\r\n # continued until the last pair of rows:\r\n elif row == len(subarray) - 2:\r\n # Add the remaining touches to the no_rt array\r\n no_rt = np.vstack([no_rt, subarray])\r\n # Break the while loop and go to the next channel (in the\r\n # outer for loop)\r\n stop = True\r\n break\r\n\r\n # Restore the initial order of touches\r\n no_rt = no_rt[np.argsort(no_rt[:,2])]\r\n deleted = deleted[np.argsort(deleted[:,2])]\r\n\r\n return no_rt, deleted\r\n\r\ndef double_electrode(touches):\r\n \"\"\"This function filters out touches that occured shortly after each other\r\n on two adjacent electrode channels, which indicates that a mouse put its\r\n paw onto both electrodes (almost) simultaneously. In most cases, this is a\r\n single touch and so it should not count as two foot faults. The touch to\r\n be deleted is the one closest to the narrow end of the beam.\r\n\r\n Input argument\r\n touches: a 2D numpy array containing the individual touch data\r\n\r\n Output\r\n no_de: a 2D numpy array containing the filtered individual touch data\r\n deleted: a 2D numpy array containing the deleted touch data\"\"\"\r\n\r\n # Keep an array of all touches that are deleted\r\n deleted = np.zeros([0,4])\r\n\r\n while True:\r\n\r\n touch_deleted = False\r\n # For each touch, obtain an array of other touches that started within\r\n # 150 ms after the start of that touch\r\n for row in range(len(touches)):\r\n # Select all touches after the current one\r\n subarray = touches[row+1:]\r\n # Keep all touches that started at or before 150 ms after current touch\r\n within_time = subarray[np.where(subarray[:,2] <= touches[row][2] + 0.150)]\r\n\r\n # For each touch in the array, check whether or not the absolute\r\n # difference in channel is 2 (e.g. 6 and 4, or 6 and 8). If true,\r\n # these electrodes are adjacent along the length of the beam.\r\n # The only exceptions are the start and finish electrodes.\r\n for row2 in range(len(within_time)): # only loops if within_time is not null\r\n ch_1 = touches[row][1]\r\n ch_2 = within_time[row2][1]\r\n if abs(ch_1 - ch_2) == 2 and (ch_1 and ch_2 not in [0,47]):\r\n\r\n # If so, delete the touch(es) corresponding to the lowest\r\n # channel number (e.g the one closest to the finish)\r\n if ch_1 < ch_2:\r\n deleted = np.vstack([deleted, touches[row]])\r\n touches = np.delete(touches, row, 0)\r\n touch_deleted = True\r\n break\r\n else:\r\n deleted = np.vstack([deleted, within_time[row2]])\r\n touches = np.delete(touches, row+row2+1, 0)\r\n touch_deleted = True\r\n break\r\n\r\n # If a touch has been deleted during the inner loop,\r\n # break from the outer loop as well and restart with updated array\r\n if touch_deleted == True:\r\n break\r\n\r\n # 4. Keep looping until no touch has been deleted anymore\r\n if touch_deleted == False:\r\n no_de = touches\r\n break\r\n\r\n return no_de, deleted\r\n","sub_path":"filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":7412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"374656068","text":"\"\"\"set default value of new answer notif\n\nRevision ID: c66c4c47cbac\nRevises: c817ad440ab6\nCreate Date: 2017-11-29 15:39:02.412490\n\n\"\"\"\nimport os\n\nfrom alembic import op\nimport sqlalchemy as sa\n\nschema_name = os.getenv('SCHEMA_NAME')\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c66c4c47cbac'\ndown_revision = 'c817ad440ab6'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n with op.batch_alter_table(\"user\", schema=schema_name) as batch_op:\n batch_op.alter_column('is_enabled_new_answer_notif', server_default='False')\n\n\ndef downgrade():\n with op.batch_alter_table(\"user\", schema=schema_name) as batch_op:\n batch_op.alter_column('is_enabled_new_answer_notif', server_default='True')\n","sub_path":"alembic/versions/c66c4c47cbac_set_default_value_of_new_answer_notif.py","file_name":"c66c4c47cbac_set_default_value_of_new_answer_notif.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"49496484","text":"\"\"\"\n\nSubarray with Maximum Sum#41\nGiven an array of integers array, find the subarray of contiguous elements in array that has the maximum sum. Return the sum of the subarray.\n\nFor example:\n\nInput: [-4, 0, 5, -2, 3, 3, -1]\nOutput: 9\nExplanation: The subarray with maximum sum is [5, -2, 3, 3]\n\nInput: [-1, -2, -3, -4, -5, -6]\nOutput: -1\nExplanation: The subarray must be of at least 1 element, so [-1] is the best we can get.\n\nInput:\narray elements separated by space.\n\n-4 0 5 -2 3 3 -1\n\nOutput:\nThe maximum sum.\n\n9\n\n#Apple #Microsoft #Google #Amazon #LinkedIn #Facebook #Uber #Array #DynamicProgramming #Recursion\n\n\n\"\"\"\n\ndef subarrayWithMaximumSum(array):\n \"\"\"\n Args:\n {int[]} array\n Returns:\n {int} The maximum sum.\n \"\"\"\n return get_max(0, len(array) - 1, array)\n\ndef get_max(head, tail, array):\n if not array:\n return 0\n\n if head == tail:\n return array[head]\n\n middle = (head + tail) // 2\n max_from_left = get_max(head, middle, array)\n max_from_right = get_max(middle + 1, tail, array)\n\n max_left = 0\n current_sum = 0\n index = middle - 1\n while index >= head:\n current_sum += array[index]\n max_left = max(current_sum, max_left)\n index -= 1\n\n max_right = 0\n current_sum = 0\n index = middle + 1\n while index <= tail:\n current_sum += array[index]\n max_right = max(current_sum, max_right)\n index += 1\n\n max_cross = max_left + max_right + array[middle]\n\n return max(max_from_left, max_from_right, max_cross)\n\n\n\n","sub_path":"pkg/subarray_with_maximum_sum.py","file_name":"subarray_with_maximum_sum.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"75869734","text":"from math import sqrt\nfrom random import shuffle\n\n\ndef power(x, y, p):\n\n res = 1 # Initialize result\n\n x = x % p # Perform x (mod p)\n\n while (y > 0):\n # If y is odd, multiply x with result\n if (y & 1):\n res = (res * x) % p\n\n # y must be even now\n y = y >> 1 # y = y/2\n x = (x * x) % p\n\n return res\n\n\ndef findPrimeFactors(n):\n s = set()\n\n # Print the number of 2s that divide n\n while (n % 2 == 0):\n s.add(2)\n n = n // 2\n\n # n must be odd at this po. So we can\n # skip one element (Note i = i +2)\n for i in range(3, int(sqrt(n)), 2):\n\n # While i divides n, print i and divide n\n while (n % i == 0):\n\n s.add(i)\n n = n // i\n\n # This condition is to handle the case\n # when n is a prime number greater than 2\n if (n > 2):\n s.add(n)\n return s\n\n\ndef findRandomPrimitive(n): # Function to find a random primitive root of n\n # Calculates the value of the Euler Totient of n (phi). This is n-1 since n is prime.\n phi = n - 1\n\n # Find prime factors of phi and store in a set\n s = findPrimeFactors(phi)\n\n # Check for every number from 2 to phi in a random order (by first shuffling the )\n candidates = list(range(2, phi + 1))\n shuffle(candidates)\n for r in candidates:\n\n # Iterate through all prime factors of phi and check if we found a power with value 1\n flag = False\n for it in s:\n\n # Check if r^((phi)/primefactors)\n # mod n is 1 or not\n if (power(r, phi // it, n) == 1):\n\n flag = True\n break\n\n # If there was no power with value 1.\n if (flag == False):\n return r\n\n # If no primitive root found\n return -1\n\n\nprint(findRandomPrimitive(107))\n","sub_path":"src/Diffie-Hellman Key Exchange/PrimitiveRoot.py","file_name":"PrimitiveRoot.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"564784220","text":"import sys, os\nimport pandas as pd\nfrom preprocessor import Preprocessor\nfrom dataSet import DataSet\nsys.path.append(os.path.dirname(os.path.realpath(__file__)) + \"/rna\")\nsys.path.append(os.path.dirname(os.path.realpath(__file__)) + \"/hybrid\")\nsys.path.append(os.path.dirname(os.path.realpath(__file__)) + \"/knn\")\nsys.path.append(os.path.dirname(os.path.realpath(__file__)) + \"/svm\")\nsys.path.append(os.path.dirname(os.path.realpath(__file__)) + \"/rf\")\nsys.path.append(os.path.dirname(os.path.realpath(__file__)) + \"/naive_bayes\")\nsys.path.append(os.path.dirname(os.path.realpath(__file__)) + \"/lstm\")\nsys.path.append(os.path.dirname(os.path.realpath(__file__)) + \"/dt\")\n\nfrom cross_validation import CrossValidation\nfrom preprocessor import Preprocessor\nfrom dataSet import DataSet\nfrom knn_classifier import KnnClassifier\nfrom svm_classifier import SvmClassifier\nfrom naive_bayes_classifier import NaiveBayesClassifier\nfrom rf_classifier import RfClassifier\nfrom rna_classifier import RnaClassifier\nfrom lstm_classifier import LstmClassifier\nfrom hybrid_classifier import HybridClassifier\nfrom decision_tree_classifier import DecisionTreeClassifier\nfrom rna_module import RnaModule\nfrom knn_module import KnnModule\nfrom svm_module import SvmModule\nfrom rf_module import RfModule\nfrom decision_tree_module import DecisionTreeModule\nfrom lstm_module import LstmModule\nfrom naive_bayes_module import NaiveBayesModule\nfrom evaluate_module import EvaluateModule\n\n\n#CONFIGURACAO DO KNN\nknn = KnnModule()\nknn.setKNeighbors(1)\nknn_classifier = KnnClassifier()\nknn_classifier.setKnn(knn)\n\n#CONFIGURACAO DO SVM\nsvm = SvmModule()\nsvm_classifier = SvmClassifier()\nsvm_classifier.setSvm(svm)\n\n#CONFIGURACAO DO RF\nrf = RfModule()\nrf_classifier = RfClassifier()\nrf_classifier.setRf(rf)\n\n#CONFIGURACAO DO RF\ndt = DecisionTreeModule()\ndt_classifier = DecisionTreeClassifier()\ndt_classifier.setDecisionTree(dt)\n\n#CONFIGURACAO DA NAIVEBAYES\nnaive_bayes = NaiveBayesModule()\nnaive_bayes_classifier = NaiveBayesClassifier()\nnaive_bayes_classifier.setNaiveBayes(naive_bayes)\n\n#CONFIGURACAO DO LSTM\nlstm = LstmModule()\nlstm.setInputLength(20)\nlstm.setNumberExamples(1000)\nlstm_classifier = LstmClassifier()\nlstm_classifier.setLstm(lstm)\n\n\n#CONFIGURACAO DA REDE NEURAL \nrna = RnaModule()\nrna.setNumberNeuronsImputLayer(41)\nrna.setActivationFunctionImputLayer(\"tanh\")\nrna.setImputDimNeurons(41)\nrna.setNumberNeuronsHiddenLayer(41)\nrna.setActivationFunctionHiddenLayer(\"tanh\")\nrna.setNumberNeuronsOutputLayer(1)\nrna.setActivationFunctionOutputLayer(\"tanh\")\nrna_classifier = RnaClassifier()\nrna_classifier.setRna(rna)\n\n\n#METODO HIBRIDO \nhybrid_classifier = HybridClassifier()\nhybrid_classifier.setPercentilFaixaSup(25)\nhybrid_classifier.setPercentilFaixaInf(100)\nhybrid_classifier.setRna(rna)\nhybrid_classifier.setKnn(knn)\n\n\n#PREPROCESSADOR PARA ATRIBUTOS CATEGORICOS\npreprocessor = Preprocessor()\npreprocessor.setColumnsCategory(['protocol_type','service','flag'])\n#preprocessor.setColumnsCategory(['service','flag'])\n\nevaluate = EvaluateModule()\n\ncross = CrossValidation()\n#DEFINIR A ITERACAO QUE O CROSS VALIDATION ESTA\ncross.setIteration(1)\ncross.setK(10)\ncross.setPreprocessor(preprocessor)\n#cross.setFilePath(\"bases/sub_bases_20_nslkdd/\")\n#icross.setFilePath(\"bases/sub_bases_train+_nslkdd/\")\n#cross.setFilePath(\"bases/sub_bases_nslkdd_tcp_attribute/\")\n#cross.setFilePath(\"bases/sub_bases_nslkdd_12attribute/\")\n#cross.setFilePath(\"bases/sub_bases_nslkdd_20attribute/\")\n#cross.setFilePath(\"bases/sub_bases_SmallTrainingSet/\")\n#cross.setFilePath(\"bases/sub_bases_small_training_set1000/\")\n\n#cross.setResultPath(\"results/faixa_hibrido/\")\n#cross.setFilePath(\"../../Bases/MachineLearningCVE/DoS/\")\n#cross.setFilePath(\"../../Bases/NSL-KDD/bases/attribute_selection/sub_bases_iris/\")\n#cross.setFilePath(\"../../Bases/NSL-KDD/bases/attribute_selection/sub_bases_nslkdd_20attribute/\")\ncross.setFilePath(\"../../Bases/NSL-KDD/bases/attribute_selection/sub_bases_nslkdd_complete/\")\n#cross.setResultPath(\"../results_ann-knn_cicids2017_ddos/completa/svm/\")\n#cross.setResultPath(\"../results_ann-knn_NSL-KDD/20att/dt/\")\n#cross.setResultPath(\"../results_iris/completa/naive_bayes/\")\n#cross.setResultPath(\"../results_iris/completa/dt/\")\ncross.setResultPath(\"../NOVOSEXPERIMENTOS/NSL-KDD/completa/hybP25N1-1/\")\n#cross.setClassifier(rna_classifier)\n#cross.setClassifier(knn_classifier)\n#cross.setClassifier(svm_classifier)\n#cross.setClassifier(rf_classifier)\n#cross.setClassifier(naive_bayes_classifier)\n#cross.setClassifier(lstm_classifier)\n#cross.setClassifier(dt_classifier)\n#cross.setClassifier(clustered_knn_classifier)\n#cross.setClassifier(clustered_density_knn_classifier)\ncross.setClassifier(hybrid_classifier)\n\ncross.setClass_name('classe')\n#cross.setClass_name(' Label')\n\ncross.setEvaluateModule(evaluate)\ncross.run()\n","sub_path":"1_hybP25N1_nslkddcomplete.py","file_name":"1_hybP25N1_nslkddcomplete.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"66729544","text":"from PIL import Image\nimport os\nimport os.path\nimport torch.utils.data\nimport torchvision.transforms as transforms\nimport numpy as np\n\n\nlabelfiles = {\n 'train': [\n '1_train.txt', #texture_related\n '2_train.txt', #fabric_related\n '3_train.txt', #shape_related\n '4_train.txt', #part_related\n '5_train.txt' #style_related\n ],\n 'val': [\n '1_val.txt',\n '2_val.txt',\n '3_val.txt',\n '4_val.txt',\n '5_val.txt'\n ],\n 'test': [\n '1_test.txt',\n '2_test.txt',\n '3_test.txt',\n '4_test.txt',\n '5_test.txt'\n ]\n}\n\nfilenames = {\n 'train': [\n 'train_att1.txt',\n 'train_att2.txt',\n 'train_att3.txt',\n 'train_att4.txt',\n 'train_att5.txt'\n ],\n 'val': [\n 'val_att1.txt',\n 'val_att2.txt',\n 'val_att3.txt',\n 'val_att4.txt',\n 'val_att5.txt'\n ],\n 'test': [\n 'test_att1.txt',\n 'test_att2.txt',\n 'test_att3.txt',\n 'test_att4.txt',\n 'test_att5.txt'\n ]\n}\n\n\ndef default_image_loader(path):\n return Image.open(path).convert('RGB')\n\nclass TripletImageLoader(torch.utils.data.Dataset):\n def __init__(self, root, conditions, split, n_triplets, transform=None,\n loader=default_image_loader):\n \"\"\" filenames_filename: A text file with each line containing the path to an image e.g.,\n images/class1/sample.jpg\n triplets_file_name: A text file with each line containing three integers,\n where integer i refers to the i-th image in the filenames file.\n For a line of intergers 'a b c', a triplet is defined such that image a is more\n similar to image c than it is to image b, e.g.,\n 0 2017 42 \"\"\"\n self.root = root\n\n #모든 파일 경로가 적혀있는 txt. index로 접근가능함\n\n\n #fnames엔 split별 .txt파일 이름 5개가 들어감.\n triplets = []\n if split == 'train':\n fnames = filenames['train'] #20만개\n lnames = labelfiles['train']\n elif split == 'val':\n fnames = filenames['val'] #2만개\n lnames = labelfiles['val']\n else:\n fnames = filenames['test'] #4만개\n lnames = labelfiles['test']\n\n ''' \n train / val/ test별로 저 해당 파일의 index로 접근해서 가져오는 역할을 함\n '''\n attribute_dict = dict() # conditions만큼 생성해서, 각 condition의 attribute별로 list로 파일 path를 저장했음\n\n for condition in conditions: # a dictionary 초기화\n attribute_dict[condition] = []\n\n for condition in conditions: # condition마다, 파일을 읽어서 attribute별로 list를 만들어서, a condition 위치에 추가함\n with open(os.path.join(self.root, 'DeepFashion', lnames[condition]), 'r') as f: # 파일의 라인별로 읽어옴\n datas = f.readlines()\n for data in datas:\n sub = [] # 임시 저장소\n filepaths = data.split()[1:] # condition의 한 attribute의 file path들\n for filepath in filepaths:\n sub.append(filepath.replace(\"\\n\", \"\"))\n attribute_dict[condition].append(sub)\n\n for condition in conditions: #0, 1, 2, 3, 4\n '''\n 해당 train/val/test별로 적혀있는 모든 txt를 돌면서 0, 1, 2번째, condition을 triplets에 append함.\n '''\n with open(os.path.join(self.root, 'DeepFashion', fnames[condition]), 'r') as f:\n datas = f.readlines()\n for anchor in datas:\n anchor = anchor.replace(\"\\n\", \"\")\n\n while True:\n random_attribute_far = np.random.randint(len(attribute_dict[condition]))\n random_imgIdx_far = np.random.randint(len(attribute_dict[condition][random_attribute_far]))\n anchor_far = attribute_dict[condition][random_attribute_far][random_imgIdx_far]\n # 다른 attribute img\n\n if anchor != anchor_far and anchor not in attribute_dict[condition][random_attribute_far]:\n # anchor_far와 같지 않고, attribute도 달라야 break\n break\n\n while True:\n random_attribute_close = np.random.randint(len(attribute_dict[condition]))\n random_imgIdx_close = np.random.randint(len(attribute_dict[condition][random_attribute_close]))\n anchor_close = attribute_dict[condition][random_attribute_close][random_imgIdx_close]\n # 같은 attribute img\n\n if anchor != anchor_close and anchor in attribute_dict[condition][random_attribute_close]:\n # anchor_close와 같지 않고, attribute도 같아야 break\n break\n\n triplets.append((anchor, anchor_far, anchor_close, condition)) # anchor, far, close, .txts\n\n\n np.random.shuffle(triplets)\n\n self.triplets = triplets[:int(n_triplets * 1.0 * len(conditions) / 4)]\n self.transform = transform\n self.loader = loader\n\n def __getitem__(self, index):\n path1, path2, path3, c = self.triplets[index]\n '''\n *할일*\n index로 정의된 path1, 2, 3를 나는 이미 path로 정해두었기 때문에, 바로 self.loader로 열면 될 것 같다.\n '''\n if os.path.exists(os.path.join(self.root, path1)) and os.path.exists(os.path.join(self.root, path1)) and os.path.exists(os.path.join(self.root, path1)):\n img1 = self.loader(os.path.join(self.root, path1))\n img2 = self.loader(os.path.join(self.root, path2))\n img3 = self.loader(os.path.join(self.root, path3))\n if self.transform is not None:\n img1 = self.transform(img1)\n img2 = self.transform(img2)\n img3 = self.transform(img3)\n return img1, img2, img3, c\n else:\n return None\n\n def __len__(self):\n return len(self.triplets)\n","sub_path":"triplet_image_loader.py","file_name":"triplet_image_loader.py","file_ext":"py","file_size_in_byte":6391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"564924950","text":"# CNN for classifying crops.\r\n# Crop classification with pretrained algorithm.\r\nfrom keras.applications.inception_v3 import InceptionV3, preprocess_input\r\nfrom keras.preprocessing import image\r\nfrom keras.models import Model\r\nfrom keras.layers import Dense, GlobalAveragePooling2D\r\nfrom keras import metrics\r\nfrom keras import losses\r\nfrom keras import backend as K\r\nimport os\r\n\r\nos.chdir(r\"C:/Users/Tim/Desktop/GIS/GISproject/\")\r\n\r\n# Import the pretrained model\r\nbase_model = InceptionV3(weights='imagenet', include_top=False)\r\n\r\nx = base_model.output\r\nx = GlobalAveragePooling2D()(x)\r\n\r\n# Add a fully connected layer.\r\nx = Dense(128, activation='relu')(x)\r\n\r\n# Add a classifying layer, 2 classes (Binary classification)\r\npredictions = Dense(1, activation='sigmoid')(x)\r\n\r\n# The model we'll train.\r\nmodel = Model(inputs=base_model.input, outputs=predictions)\r\n\r\n# Train only the top layer, freeze the weights of the others.\r\nfor layer in base_model.layers:\r\n layer.trainable = False\r\n \r\n# Compile the model\r\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=[metrics.hinge, 'accuracy', metrics.kullback_leibler_divergence])\r\n\r\n# Train the model on new data for a few epochs.\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\n# Create the generators for datasets.\r\ntrain_datagen = ImageDataGenerator(rescale = 1./255,\r\n shear_range = 0.2,\r\n zoom_range = 0.2,\r\n horizontal_flip = True,\r\n rotation_range = 20,\r\n width_shift_range = 0.2,\r\n height_shift_range = 0.2)\r\n\r\ntest_datagen = ImageDataGenerator(rescale = 1./255)\r\n\r\n# Get the image from the directories.\r\ntraining_set = train_datagen.flow_from_directory(r'temporal_stacks_summer_winter/train',\r\n target_size = (299, 299),\r\n batch_size = 32,\r\n class_mode = 'binary')\r\n\r\ntest_set = test_datagen.flow_from_directory(r'temporal_stacks_summer_winter/test',\r\n target_size = (299, 299),\r\n batch_size = 32,\r\n class_mode = 'binary')\r\n\r\nmodel.fit_generator(training_set, steps_per_epoch=10, epochs=10, \r\n validation_data=test_set, validation_steps=10)","sub_path":"binary_crop_classif_cnn.py","file_name":"binary_crop_classif_cnn.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"308952713","text":"from typing import List\n\nclass Solution:\n def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:\n N = len(graph)\n def solve(node):\n if node == N - 1: return [[N - 1]]\n ans = []\n for nei in graph[node]:\n for path in solve(nei):\n ans.append([node] + path)\n return ans\n return solve(0)\n\n\n\ns = Solution()\ngraph = [[4,3,1],[3,2,4],[3],[4],[]]\nprint(s.allPathsSourceTarget(graph))\n\n\n\n\n","sub_path":"python/dfs/797. 所有可能的路径.py","file_name":"797. 所有可能的路径.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"195653459","text":"#-*- coding: latin-1 -*-\n#Algorithme qui sert à trier les tickets de INCIDENTS\n#Extraire les champs importants\n#Utiliser les fonctionnalités Django pour créer la base de données\n\nfrom incident.models import Incident\nfrom django.conf import settings\nfrom datetime import datetime\nimport pytz\n\n\n\ndef recupFichier():\n \"\"\"\n Cette fonction récupère le fichier csv et renvoie un tableau à deux dimensions.\n La première correspond à un ticket et la deuxième correspond à chaque élément du ticket.\n \"\"\"\n #On ouvre le fichier contenant els tickets\n fic = open(settings.MEDIA_ROOT+'/tickets_incidents.csv', 'r', encoding='latin-1')\n t = fic.readlines() #on stock dans t toutes les lignes du fichier de tickets --> Chaque ligne EST un ticket\n fic.close() #On ferme le flux\n \n u = [] #u est comme t, à la différence que chaque colonne est un champ du ticket\n \n for i in range(len(t)): #On parcourt tous les tickets ----- len(t)\n u.append(t[i].split(\",\")) #On sépare les champs du ticket et on enlève les guillemets\n return u\n \n \ndef traitementTicketIncident(tabIncidents):\n #On instancie un objet Incident de la forme : \n \n #Incident(type_element, nom_element, etat_element, gravite, date_apparition, date_disparition_systeme)\n #Incident( 16, 17, 18, 27, 28, 30)\n timzeone = pytz.timezone('UTC')\n for p in range(len(tabIncidents)): #Tant qu'il y a des tickets\n \n #On récupère la date au champ 23\n texteDate = tabIncidents[p][28][1:-1]\n texteDateFin = tabIncidents[p][30][1:-1]\n \n \n d = datetime(int(texteDate[27:29])+2000,int(texteDate[24:26]),int(texteDate[21:23]),int(texteDate[30:32]),int(texteDate[33:35]),int(texteDate[36:38])) \n nom_element = tabIncidents[p][17][1:-1]\n \n if Incident.objects.filter(date_apparition__contains=d).filter(nom_element__contains=nom_element).count()==1:\n print(nom_element+\" \"+d.__str__()+\" deja present\")\n pass\n else:\n if(len(texteDateFin) == 22):\n dfin = datetime(int(texteDate[27:29])+2000,int(texteDate[24:26]),int(texteDate[21:23]),23,59,59) #Si la date de fin n'est pas renseignée, on prend le jour du début à 23h59m59s\n else :\n dfin = datetime(int(texteDateFin[28:30])+2000,int(texteDateFin[25:27]),int(texteDateFin[22:24]),int(texteDateFin[31:33]),int(texteDateFin[34:36]),int(texteDateFin[37:39]))\n \n i = Incident(type_element = tabIncidents[p][16][1:-1], nom_element = tabIncidents[p][17][1:-1], etat_element = tabIncidents[p][18][1:-1], gravite = tabIncidents[p][27][1:-1],date_apparition = d, date_disparition_systeme = dfin)\n i.save()\n \n \nliste = recupFichier()\ntraitementTicketIncident(liste)\n\n# Commande shell manage.py : exec(open('incident/chargement_conf_incidents.py').read())\n","sub_path":"LILAS/incident/chargement_conf_incidents.py","file_name":"chargement_conf_incidents.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"234243711","text":"from typing import List\r\n\r\nfrom models.economy_user import EconomyUser\r\nfrom models.enums import CollectionNames\r\nfrom database.mongo_client import get_mongo_client\r\n\r\n\r\ndef get_users_starting_with(search: str, database_name: str) -> List[EconomyUser]:\r\n mclient = get_mongo_client()\r\n coll = mclient[database_name][CollectionNames.users.value]\r\n\r\n users = []\r\n results = coll.find({\r\n 'name': {'$regex': f'^{search}', '$options' :'i'}\r\n })\r\n\r\n for result in results:\r\n user = EconomyUser(-1, database_name=database_name)\r\n user.get_data_from_dict(result)\r\n\r\n users.append(user)\r\n\r\n return users\r\n","sub_path":"core/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"446311988","text":"import os\n\ndef char_replace(instr):\n\tinstr = instr.replace(' ', '_')\n\tfor char in ['(', ')', '[', ']', ',', '/', \"'\", \":\", \";\"]:\n\t\tinstr = instr.replace(char, '')\n\treturn instr.lower()\n\ndef makedirs(output, book, localdir):\n\tbookdir = os.path.abspath(output + \"/\" + char_replace(book) + \"/\")\n\tif not os.path.exists(bookdir):\n\t\tos.makedirs(bookdir)\n\tif not os.path.exists(bookdir + \"/rules/\"):\n\t\tos.makedirs(bookdir + \"/rules/\")\n\tif not os.path.exists(bookdir + \"/\" + localdir + \"/\"):\n\t\tos.makedirs(bookdir + \"/\" + localdir + \"/\")\n","sub_path":"src/psrd/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"9471465","text":"from django import forms\nfrom django.utils.translation import ugettext as _\n\nfrom crispy_forms.helper import FormHelper, Layout\nfrom crispy_forms.layout import Div, Row, Submit\n\nfrom .models import BudgetEntry, BudgetSetting\n\n\nclass BudgetSettingForm(forms.ModelForm):\n\n class Meta:\n model = BudgetSetting\n fields = ('reserve_model', 'model_amount', 'reserve_rounding_place')\n\n def __init__(self, *args, **kwargs):\n super(BudgetSettingForm, self).__init__(*args, **kwargs)\n\n self.model_name = self._meta.model._meta.verbose_name\n\n self.helper = FormHelper()\n\n self.helper.layout = Layout(\n Row(\n Div('reserve_model', css_class='col-md-6'),\n Div('model_amount', css_class='col-md-6')\n ),\n Row(\n Div('reserve_rounding_place', css_class='col-md-12'),\n ),\n )\n\n self.helper.add_input(Submit('update', _('Update')))\n\n\n\nclass BudgetForm(forms.ModelForm):\n\n class Meta:\n model = BudgetEntry\n fields = ('description', 'amount', 'cycle', 'tags')\n\n def __init__(self, *args, **kwargs):\n super(BudgetForm, self).__init__(*args, **kwargs)\n\n self.model_name = self._meta.model._meta.verbose_name\n\n self.helper = FormHelper()\n\n self.helper.layout = Layout(\n Row(\n Div('description', css_class='col-md-9'),\n Div('tags', css_class='col-md-3')\n ),\n Row(\n Div('amount', css_class='col-md-6'),\n Div('cycle', css_class='col-md-6')\n ),\n )\n\n if self.instance.pk:\n self.helper.add_input(Submit('update', _('Update')))\n else:\n self.helper.add_input(Submit('add', _('Add')))","sub_path":"homedesk/homedesk/apps/budget/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"240584896","text":"import re\nimport urllib.request\nimport os\nimport time\nfrom io import BytesIO\nfrom PIL import Image\n\n\ndef getHtml(url):\n try:\n page = urllib.request.urlopen(url)\n html = page.read()\n html = str(html)\n return html\n except urllib.error.HTTPError as e:\n print(e.reason)\n return False\n\n\ndef mkdir(path):\n path = path.strip()\n isExists = os.path.exists(path)\n if not isExists:\n os.makedirs(path)\n print(\"Created file directory at \" + path)\n return True\n else:\n print(path + ' created successfully.')\n return False\n\n\ndef saveImages(imglist, name):\n number = 1\n for imageURL in imglist:\n splitPath = imageURL.split('.')\n fTail = splitPath.pop()\n fName = splitPath.pop().split('/').pop()\n if len(fTail) > 3:\n fTail = 'jpg'\n fileName = name + \"/\" + fName + str(number) + \".\" + fTail\n try:\n # header = {'Host':'96xxnet1.com', \n # 'Connection':'keep-alive', \n # 'Cache-Control':'max-age=0', \n # 'Accept': 'text/html, */*; q=0.01', \n # 'X-Requested-With': 'XMLHttpRequest', \n # 'useragent' : 'User-Agent:Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36', \n # 'DNT':'1', \n # 'Referer': 'http://96xxnet1.com/', \n # 'Accept-Encoding': 'gzip, deflate, sdch', \n # 'Accept-Language': 'zh-CN,zh;q=0.8,ja;q=0.6'}\n rq = urllib.request.Request(imageURL)\n u = urllib.request.urlopen(rq)\n data = u.read()\n try:\n im = Image.open(BytesIO(data))\n except Exception as err:\n print(err)\n if im.size[0] < 300:\n return\n f = open(fileName, 'wb+')\n f.write(data)\n print('Saving a pic named ', fileName)\n f.close()\n except urllib.error.HTTPError as e:\n print(imageURL)\n print(e.reason)\n number += 1\n\n\n# get all the image url from html\ndef getAllImg(html):\n reg = r'src=\\\"(.{0,100}\\.jpg)\\\"'\n imglist = re.findall(reg, html)\n return imglist\n\n\ndef crawling(url, path):\n print(\"Crawling \" + url)\n srcHtml = getHtml(url)\n if not srcHtml:\n return False\n mkdir(path)\n imglist = getAllImg(srcHtml)\n # print(imglist)\n saveImages(imglist, path)\n return True\n\n\ndef getDomain(urlstr):\n reg = '(http://.+?)/.+'\n dmn = re.findall(reg, urlstr)\n if len(dmn) > 0:\n dmnstr = dmn[0][:-1]\n print(str(dmnstr))\n return dmnstr\n else:\n return urlstr\n\n\ndef crawling_by_category(url, subcategory):\n reg = 'testing urls\")\nclass IndexView(generic.ListView):\n template_name='movies/index.html'\n context_object_name='all_albums'\n #bydefault it will return with variable name of \"object_list\"\n def get_queryset(self):\n return Album2.objects.all()\nclass DetailView(generic.DetailView):\n model=Album2\n template_name='movies/detail.html'\nclass CreateAlbum(CreateView):\n model=Album2\n fields=['artist','album_title','genre','album_logo']\nclass AlbumUpdate(UpdateView):\n model=Album2\n fields = ['artist', 'album_title', 'genre', 'album_logo']\n\nclass AlbumDelete(DeleteView):\n model = Album2\n success_url=reverse_lazy('movies:index')","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"240935795","text":"phrase = \"Don't panic!\"\nplist = list(phrase)\nprint(phrase)\nprint(plist)\n\nfor i in range(3, 6, 2):\n plist[i] = plist[i + 2]\nplist = plist[1:-5]\n\nnew_phrase = ''.join(plist)\nprint(plist)\nprint(new_phrase)\n","sub_path":"list_on_tap2.py","file_name":"list_on_tap2.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"529790210","text":"import pandas as p\nimport smtplib as sm\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.image import MIMEImage\n\ndata = p.read_excel(\"giz_result.xlsx\")\n# print(type(data))\nemail_col = data.get(\"EMAIL\")\nname_col = data.get(\"NAME\")\ntheory_col = data.get(\"THEORY\")\npractical1_col = data.get(\"PRACTICAL1\")\npractical2_col = data.get(\"PRACTICAL2\")\npractical3_col = data.get(\"PRACTICAL3\")\npractical4_col = data.get(\"PRACTICAL4\")\npractical5_col = data.get(\"PRACTICAL5\")\ntotal_col = data.get(\"TOTAL\")\nremarks_col = data.get(\"REMARKS\")\nlist_of_emails = list(email_col)\nlist_of_name = list(name_col)\nlist_of_theory = list(theory_col)\nlist_of_practical1 = list(practical1_col)\nlist_of_practical2 = list(practical2_col)\nlist_of_practical3 = list(practical3_col)\nlist_of_practical4 = list(practical4_col)\nlist_of_practical5 = list(practical5_col)\nlist_of_total = list(total_col)\nlist_of_remarks = list(remarks_col)\nprint(list_of_emails)\nprint(list_of_name)\nprint(list_of_practical1)\nprint(list_of_practical2)\nprint(list_of_practical3)\nprint(list_of_practical4)\nprint(list_of_practical5)\nprint(list_of_total)\nprint(list_of_remarks)\n# print(email_col)\nplain1 = \"TESTING THIS SYSTEM\"\nt1 = t2 = t3 = t4 = t5 = t6 = 12\nt7 = 81\ntheory_score = 23\npractical1 = 13\npractical2 = 14\npractical3 = 7\npractical4 = 7\npractical5 = 14\ntotal_score = 89\nname = \"CHINEDU ERNEST\"\nrmks = \"EXCELLENT\"\nmsg = '''\nTHEORY: %s ,\nPRACTICAL 1: %s ,\nPRACTICAL 2: %s ,\nPRACTICAL 3: %s ,\nPRACTICAL 4: %s ,\nPRACTICAL 5: %s ,\nTOTAL:( %s )\n''' % (t1, t2, t3, t4, t5, t6, t7)\n\nfor que in range(len(list_of_name)):\n print(que)\n try:\n server = sm.SMTP(\"smtp.gmail.com\", 587)\n server.starttls()\n server.login(\"edutech4logic@gmail.com\", \"kdacthrpswbcckhi\")\n from_ = \"edutech4logic@gmail.com\"\n to_ = list_of_emails\n message = MIMEMultipart(\"alternative\")\n message['Subject'] = \"JUNIOR ROBOTICS EXAM SCORE\"\n message['from'] = \"edutech4logic@gmail.com\"\n # theory_score = 28\n html = f'''\n \n \n \n \n \n \n
\n \n

JUNIOR ROBOTICS EXAMINATION RESULT

\t\n

NAME: {list_of_name[que]}.

\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
EXAMINATION SCORES
SESSIONSSCORES
THEORY(30%){list_of_theory[que]}
PRACTICAL 1(14%){list_of_practical1[que]}
PRACTICAL 2(14%){list_of_practical2[que]}
PRACTICAL 3(14%){list_of_practical3[que]}
PRACTICAL 4(14%){list_of_practical4[que]}
PRACTICAL 5(14%){list_of_practical5[que]}

TOTAL SCORE(100%):

{list_of_total[que]}

\n

\n

REMARKS: {list_of_remarks[que]}

\n

\n
\t\n \n \n \n \n '''\n # msgAlternative = MIMEMultipart('alternative')\n # message.attach(msgAlternative)\n text = MIMEText(html,\"html\")\n # plain2 = MIMEText(msg, \"plain\")\n # text = text.format(12, 23, 23)\n # message.attach(plain2)\n # msgText = MIMEText('Some HTML text and an image.

KPI-DATA!', 'html')\n # msgAlternative.attach(msgText)\n # fp = open('banner_logo.jpg', 'rb') # Read image\n # msgImage = MIMEImage(fp.read())\n # fp.close()\n\n # Define the image's ID as referenced above\n # msgImage.add_header('Content-ID', '')\n # message.attach(msgImage)\n\n # msg = plain1+text\n message.attach(text)\n # mail.sendmail('from', 'to', html.format(score1=\"20\" score2=\"10\", score3=\"30\"))\n # server.sendmail(from_, to_, message.as_string())\n server.sendmail(from_, list_of_emails[que], message.as_string())\n print(\"mail sent successfully\")\n server.quit()\n except Exception as e:\n print(e)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"547450726","text":"'''\nConverts a set of three psoition vectors into state vector using Gibb's method\nfrom which orbital elements can be found easily.\n'''\n\nimport sys\nimport os.path\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\n\nimport numpy as np\nimport math\nimport re\n\nsqrt = np.sqrt\npi = np.pi\nmeu = 398600.4418\n\nclass Gibbs(object):\n\n @classmethod\n def convert_list(self, vec):\n '''\n Type casts the input data for the ease of use.\n\n Converts the values of the input list with string datatype into float\n for the ease of furthur computation.\n\n Args:\n vec (list): input vector\n\n Returns:\n list: vector converted to float values\n '''\n return [float(vec[1]), float(vec[2]), float(vec[3])]\n\n @classmethod\n def find_length(self, path):\n '''\n Finds the length of the input file.\n\n Calculates the length of the file with the given path containing all the\n position vectors. File should contain a header line describing all of its\n attributes in a single line only. This function removes the header line\n before it calculates file length. If the file does not contains the header\n line then the first line of data will get removed and you have to add 1\n to the output after it returns the result.\n\n Args:\n path (str): file path\n\n Returns:\n int: length of file\n '''\n length = open(path, 'r')\n length.readline() # it is used to remove the header line\n\n size = 0\n while(length.readline()):\n size = size + 1\n\n return size\n\n def read_file(self, path):\n '''\n Invokes the Gibb's implementation and stores the result in a list.\n\n Read the file with the given path and forms a set of three position vectors\n then applies Gibb's Method on that set and computes state vector for every\n set. After computing state vectors it stores the result into a list. Now,\n these state vectors can be used to find orbital elements.\n\n Args:\n path (str): path to input file\n\n Returns:\n numpy.ndarray: list of all pair of position and velocity vector\n '''\n myfile = open(path, 'r')\n myfile.readline() # it is used to remove the header line\n\n size = self.find_length(path)\n upto = size-2 # size-2\n final = np.zeros((upto, 6))\n\n r1 = self.convert_list(re.split('\\t|\\n', myfile.readline()))\n r2 = self.convert_list(re.split('\\t|\\n', myfile.readline()))\n\n i = 0\n while(i < upto):\n r3 = self.convert_list(re.split('\\t|\\n', myfile.readline()))\n v2 = self.gibbs(r1, r2, r3)\n ele = self.orbital_elements(r2, v2)\n print(ele)\n data = [r2[0], r2[1], r2[2], v2[0], v2[1], v2[2]]\n final[i,:] = data\n\n r1 = r2\n r2 = r3\n i = i + 1\n\n return final\n\n @classmethod\n def magnitude(self, vec):\n '''\n Computes magnitude of the input vector.\n\n Args:\n vec (list): vector\n\n Returns:\n float: magnitude of vector\n '''\n mag_vec = math.sqrt(vec[0]**2 + vec[1]**2 + vec[2]**2)\n return mag_vec\n\n @classmethod\n def dot_product(self, a, b):\n '''\n Computes dot product of two vectors. Multiplies corresponding axis with\n each other and then adds them. Returns a single value.\n\n Args:\n a (list/array): first vector\n b (list/array): second vector\n\n Returns:\n float: dot product of given vectors\n '''\n return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]\n\n @classmethod\n def cross_product(self, a, b):\n '''\n Computes cross product of the given vectors. Returns a vector.\n\n Args:\n a (list/array): first vector\n b (list/array): second vector\n\n Returns:\n list/array: cross product of given vectors\n '''\n return [a[1]*b[2] - b[1]*a[2], (-1)*(a[0]*b[2] - b[0]*a[2]), a[0]*b[1] - b[0]*a[1]]\n\n @classmethod\n def operate_vector(self, a, b, flag):\n '''\n Adds or subtracts input vectors based on the flag value.\n\n If flag is 1 then add both vectors with corresponding values else if\n flag is 0 (zero) then subtract two vectors with corresponding values.\n Returns a vector.\n\n Args:\n a (list/array): first vector\n b (list/array): second vector\n flag (int): checks for operation (addition/subtraction)\n\n Returns:\n list/array: sum/difference of vector based on flag value\n '''\n if(flag == 1):\n return [a[0]+b[0], a[1]+b[1], a[2]+b[2]]\n elif(flag == 0):\n return [a[0]-b[0], a[1]-b[1], a[2]-b[2]]\n\n @classmethod\n def unit(self, vec):\n '''\n Finds unit vector of the given vector. Divides each value of vector by\n its magnitude. Returns a vector.\n\n Args:\n vec (list): input vector\n\n Returns:\n list: unit vector\n '''\n mag = self.magnitude(vec)\n return [i/mag for i in vec]\n\n @classmethod\n def gibbs(self, r1, r2, r3):\n '''\n Computes state vector from the given set of three position vectors using\n Gibb's implementation.\n\n Computes velocity vector (part of state vector) using Gibb's Method\n and takes r2 (input argument) as its position vector (part of state\n vector). Both combined forms state vector.\n\n Args:\n r1 (list): first position vector\n r2 (list): second position vector\n r3 (list): third position vector\n\n Returns:\n list: velocity vector\n '''\n mag_r1 = self.magnitude(r1)\n mag_r2 = self.magnitude(r2)\n mag_r3 = self.magnitude(r3)\n\n c12 = self.cross_product(r1, r2)\n c23 = self.cross_product(r2, r3)\n c31 = self.cross_product(r3, r1)\n\n # '''For checking colplanarity'''\n # unit_c23 = self.unit(c23)\n # unit_r1 = self.unit(r1)\n # check = self.dot_product(unit_r1, unit_c23)\n\n r1c23 = [mag_r1*i for i in c23]\n r2c31 = [mag_r2*i for i in c31]\n r3c12 = [mag_r3*i for i in c12]\n N = [r1c23[0]+r2c31[0]+r3c12[0], r1c23[1]+r2c31[1]+r3c12[1], r1c23[2]+r2c31[2]+r3c12[2]]\n mag_N = self.magnitude(N)\n\n D = self.operate_vector(c12, self.operate_vector(c23, c31, 1), 1)\n mag_D = self.magnitude(D)\n\n vec1 = [(mag_r2-mag_r3)*i for i in r1]\n vec2 = [(mag_r3-mag_r1)*i for i in r2]\n vec3 = [(mag_r1-mag_r2)*i for i in r3]\n S = self.operate_vector(vec1, self.operate_vector(vec2, vec3, 1), 1)\n\n term1 = math.sqrt(meu/(mag_N*mag_D))\n var1 = self.cross_product(D, r2)\n var2 = [i/mag_r2 for i in var1]\n term2 = self.operate_vector(var2, S, 1)\n v2 = [term1*i for i in term2]\n\n return v2\n\n @classmethod\n def orbital_elements(self, r, v):\n '''\n Computes orbital elements from state vector.\n\n Orbital elements is a set of six parameters which are semi-major axis,\n inclination, right ascension of the ascending node, eccentricity,\n argument of perigee and mean anomaly.\n\n Args:\n r (list): position vector\n v (list): velocity vector\n\n Returns:\n list: set of six orbital elements\n '''\n mag_r = self.magnitude(r)\n mag_v = self.magnitude(v)\n vr = self.dot_product(r, v)/mag_r\n h = self.cross_product(r, v)\n mag_h = self.magnitude(h)\n inclination = math.acos(h[2]/mag_h)*(180/pi)\n\n N = self.cross_product([0,0,1], h)\n mag_N = self.magnitude(N)\n\n ascension = math.acos(N[0]/mag_N)*(180/pi)\n if(N[1] < 0):\n ascension = 360 - ascension\n\n var1 = [(mag_v**2 - meu/mag_r)*i for i in r]\n var2 = [self.dot_product(r, v)*i for i in v]\n vec = self.operate_vector(var1, var2, 0)\n eccentricity = [i/meu for i in vec]\n mag_e = self.magnitude(eccentricity)\n\n perigee = math.acos(self.dot_product(N,eccentricity)/(mag_N*mag_e))*(180/pi)\n if(eccentricity[2] < 0):\n perigee = 360 - perigee\n\n anomaly = math.acos(self.dot_product(eccentricity,r)/(mag_e*mag_r))*(180/pi)\n if(vr < 0):\n anomaly = 360 - anomaly\n\n rp = mag_h**2/(meu*(1+mag_e))\n ra = mag_h**2/(meu*(1-mag_e))\n axis = (rp+ra)/2\n\n return [axis, inclination, ascension, mag_e, perigee, anomaly]\n\n# if __name__ == \"__main__\":\n# filename = \"orbit_simulated_a6801000.0_ecc0.000515_inc134.89461080388952_raan112.5156_aop135.0415_ta225.1155_jit0.0_dt1.0_gapno_1502628669.3819425.csv\"\n# path = \"../example_data/SimulatedCSV\" + filename\n#\n# obj = Gibbs()\n# vector = obj.read_file(path)\n# del(obj)\n","sub_path":"orbitdeterminator/kep_determination/gibbsMethod.py","file_name":"gibbsMethod.py","file_ext":"py","file_size_in_byte":9043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"329100177","text":"import xml.etree.ElementTree as etree\nfile_names = []\nbucket_path = \"https://s3.eu-central-1.amazonaws.com/deutsche-boerse-xetra-pds/\"\ntree = etree.parse('deutsche-boerse-xetra-pds')\nroot = tree.getroot()\n\n\ndef write_files():\n import urllib.request\n import os\n for file_name in file_names:\n url = bucket_path + file_name\n response =urllib.request.urlopen(url)\n os.makedirs(os.path.dirname(\"../data/\"+file_name), exist_ok=True)\n f = open(\"../data/\" + file_name, 'w+')\n data =response.read()\n text = data.decode('utf-8')\n f.write(text)\n f.close()\n print(file_name + \" completed!\")\n\n\nfor element in root:\n for child in element:\n file_names.append(child.text)\n print(child.text)\n break\n\nwrite_files()\n\n\n","sub_path":"util/fetch_data.py","file_name":"fetch_data.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"472063748","text":"import mcpi.minecraft as minecraft;\nimport mcpi.block as block;\nimport math;\n\nmc = minecraft.Minecraft.create(); #address=\"localhost\",port=4711)\nplayerPos=mc.player.getPos();\nplayerTile=mc.player.getTilePos();\n\nmc.postToChat(\"API Test!\");\n\nentityIds=mc.getPlayerEntityIds();\nFirstEntityId=entityIds[0];\n#playerAngle=mc.player.getRotation();\n\nplayerAngle=0;\n\nmc.player.setPos(playerPos.x,playerPos.y+2,playerPos.z);\n\nroof_length=9\nroof_width=6\n\n#mc.setBlocks(playerPos.x,playerPos.y,playerPos.z,playerPos.x+roof_width-1,playerPos.y+5,playerPos.z+roof_length-1,block.AIR);\n\n#mc.setBlocks(playerPos.x,playerPos.y,playerPos.z,playerPos.x+roof_width-1,playerPos.y,playerPos.z+roof_length-1,block.WOOD_PLANKS);\n\nfor y in range(0,roof_length):\n\tfor x in range(0,roof_width):\n\t\tmc.setBlock(playerPos.x+x,playerPos.y+x,playerPos.z+y,block.Block(block.STAIRS_WOOD.id,0));\n\nmc.setBlocks(playerPos.x+roof_width-1,playerPos.y,playerPos.z,playerPos.x+roof_width-1,playerPos.y+roof_width-2,playerPos.z+roof_length-1,block.WOOD_PLANKS);\n# LOG mc.setBlocks(playerPos.x+roof_width-1,playerPos.y,playerPos.z,playerPos.x+roof_width-1,playerPos.y,playerPos.z+roof_length-1,block.Block(block.WOOD.id,1));\n\nfor i in range(0,roof_width-1):\n\tmc.setBlocks(playerPos.x+roof_width-1,playerPos.y+i,playerPos.z,playerPos.x+1+i,playerPos.y+i,playerPos.z,block.WOOD_PLANKS);\n\tmc.setBlocks(playerPos.x+roof_width-1,playerPos.y+i,playerPos.z+roof_length-1,playerPos.x+1+i,playerPos.y+i,playerPos.z+roof_length-1,block.WOOD_PLANKS);\n","sub_path":"roof.py","file_name":"roof.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"22366457","text":"import argparse\nimport os\nimport sys\n\nfrom .context import Context\nfrom .input import PARSERS\nfrom .output import RENDERERS\nfrom .template import Template, determine_format\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description='A YAML to YAML preprocessor.',\n prog='emrichen',\n epilog='Variable precedence: -D > -e > -f > !Defaults',\n )\n parser.add_argument(\n 'template_file',\n nargs='?',\n type=argparse.FileType('r'),\n default=sys.stdin,\n help='The YAML template to process. If unspecified, the template is read from stdin.',\n )\n\n parser.add_argument(\n '--template-format',\n choices=PARSERS,\n default=None,\n help=(\n 'Template format. If unspecified, attempted to be autodetected from the '\n 'input filename (but defaults to YAML).'\n ),\n )\n parser.add_argument(\n '--var-file',\n '-f',\n dest='var_files',\n metavar='VAR_FILE',\n type=argparse.FileType('r'),\n action='append',\n default=[],\n help=(\n 'A YAML file containing an object whose top-level keys will be defined as variables. '\n 'May be specified multiple times.'\n ),\n )\n parser.add_argument(\n '--define',\n '-D',\n metavar='VAR=VALUE',\n action='append',\n default=[],\n help='Defines a single variable. May be specified multiple times.',\n )\n parser.add_argument(\n '--output-file',\n '-o',\n type=argparse.FileType('w'),\n default=sys.stdout,\n help='Output file. If unspecified, the template output is written into stdout.',\n )\n parser.add_argument(\n '--output-format',\n choices=RENDERERS,\n default=None,\n help=(\n 'Output format. If unspecified, attempted to be autodetected from the '\n 'output filename (but defaults to YAML).'\n ),\n )\n parser.add_argument(\n '--include-env',\n '-e',\n action='store_true',\n default=False,\n help='Expose process environment variables to the template.',\n )\n return parser\n\n\ndef main(args=None):\n if args is None:\n args = sys.argv[1:]\n parser = get_parser()\n args = parser.parse_args(args)\n\n override_variables = dict(item.split('=', 1) for item in args.define)\n\n variable_sources = list(args.var_files)\n if args.include_env:\n variable_sources.append(os.environ)\n\n args.output_format = args.output_format or determine_format(\n getattr(args.output_file, 'name', None), RENDERERS, 'yaml'\n )\n\n context = Context(*variable_sources, **override_variables)\n template = Template.parse(args.template_file, format=args.template_format)\n output = template.render(context, format=args.output_format)\n\n args.output_file.write(output)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"emrichen/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"259154604","text":"from setuptools import setup, find_packages\nfrom os.path import dirname, abspath\n\nroot_location = dirname(abspath(__file__))\n\nwith open(root_location + '/requirements.txt') as f:\n requirements = f.read().splitlines()\n\nsetup(\n name=\"dotez\",\n version=\"0.0.3\",\n packages=find_packages(),\n entry_points={\n \"console_scripts\": [\n \"dotez = dotez.cli:main\",\n ],\n },\n install_requires=requirements,\n include_package_data=True,\n\n # metadata to display on PyPI\n author=\"Jiyang Tang\",\n author_email=\"tjy1018543509@gmail.com\",\n description=\"Python module for managing dotfiles easily\",\n keywords=\"dotfiles,git,backup\",\n url=\"http://github.com/tjysdsg/dot-easy/\",\n project_urls={\n \"Bug Tracker\": \"http://github.com/tjysdsg/dot-easy/issues\",\n \"Documentation\": \"http://github.com/tjysdsg/dot-easy/wiki\",\n \"Source Code\": \"http://github.com/tjysdsg/dot-easy\",\n },\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Utilities'\n # https://pypi.org/classifiers/\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"311540975","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn import datasets\r\nfrom sklearn import cross_validation\r\nfrom sklearn import ensemble\r\nfrom sklearn import naive_bayes\r\nfrom sklearn.svm import LinearSVR\r\n\r\ndef load_data_regression():\r\n diabetes=datasets.load_diabetes()\r\n return cross_validation.train_test_split(diabetes.data, diabetes.target, test_size=0.25, random_state=0)\r\n\r\ndef load_data_classification():\r\n digits=datasets.load_digits()\r\n return cross_validation.train_test_split(digits.data, digits.target, test_size=0.25, random_state=0)\r\n\r\ndef test_AdaBoostRegressor(*data):\r\n X_train,X_test, Y_train,Y_test = data\r\n clf=ensemble.AdaBoostRegressor()\r\n clf.fit(X_train,Y_train)\r\n\r\n fig=plt.figure()\r\n ax=fig.add_subplot(1,1,1)\r\n estimates_num=len(clf.estimators_)\r\n X=range(1,estimates_num+1)\r\n ax.plot(list(X), list(clf.staged_score(X_train,Y_train)), label = 'trains score', marker='o')\r\n ax.plot(list(X), list(clf.staged_score(X_test,Y_test)), label = 'tests score', marker='*')\r\n ax.set_xlabel(r\"estimator num\")\r\n ax.set_ylabel(\"score\")\r\n ax.set_ylim(0, 1.0)\r\n ax.set_title(\"AdaBoostRegressor\")\r\n ax.legend(loc='best')\r\n plt.show()\r\n\r\ndef test_AdaBoostRegressor_base_classifier(*data):\r\n X_train,X_test, Y_train,Y_test = data\r\n fig=plt.figure()\r\n labels=['Decision Tree Regressor','linear SVR Regressor']\r\n regrs=[ensemble.AdaBoostRegressor(),ensemble.AdaBoostRegressor(base_estimator=LinearSVR(epsilon=0.01,C=100))]\r\n trains_score = []\r\n tests_score = []\r\n for i,regr in enumerate(regrs):\r\n ax=fig.add_subplot(2,1,i+1)\r\n regr.fit(X_train,Y_train)\r\n estimates_num=len(regr.estimators_)\r\n X=range(1,estimates_num+1)\r\n ax.plot(list(X), list(regr.staged_score(X_train,Y_train)), label = 'Trains score', marker='o')\r\n ax.plot(list(X), list(regr.staged_score(X_test,Y_test)), label = 'Tests score', marker='*')\r\n ax.set_xlabel(r\"learning rate\")\r\n ax.set_ylabel(\"score\")\r\n ax.set_ylim(-1, 1.0)\r\n ax.legend(loc='best')\r\n fig.suptitle(\"AdaBoostRegressor_base_classifier\")\r\n plt.show()\r\n\r\ndef test_AdaBoostRegressor_loss(*data):\r\n X_train,X_test, Y_train,Y_test = data\r\n fig=plt.figure()\r\n ax=fig.add_subplot(1,1,1)\r\n losses=['linear','square','exponential']\r\n trains_score = []\r\n tests_score = []\r\n for i,loss in enumerate(losses):\r\n ax=fig.add_subplot(2,1,i+1)\r\n clf=ensemble.AdaBoostRegressor(loss=loss,n_estimators=30)\r\n clf.fit(X_train,Y_train)\r\n estimates_num=len(clf.estimators_)\r\n X=range(1,estimates_num+1)\r\n ax.plot(list(X), list(clf.staged_score(X_train,Y_train)), label = '%s:Trains score'%loss, marker='o')\r\n ax.plot(list(X), list(clf.staged_score(X_test,Y_test)), label = '%s:Tests score'%loss, marker='*')\r\n ax.set_xlabel(r\"estimator num\")\r\n ax.set_ylabel(\"score\")\r\n ax.set_ylim(0,1)\r\n ax.set_title(\"AdaBoostRegressor with loss\")\r\n ax.legend(loc='best')\r\n plt.show()\r\n\r\ndef test_AdaBoostRegressor_learning_rate(*data):\r\n X_train,X_test, Y_train,Y_test = data\r\n fig=plt.figure()\r\n learning_rates=np.linspace(0.01,1)\r\n algorithms=['SAMME','SAMME.R']\r\n trains_score = []\r\n tests_score = []\r\n ax=fig.add_subplot(1,1,1)\r\n print(\"learning_rates = \",enumerate(learning_rates))\r\n for i,learn_rate in enumerate(learning_rates):\r\n clf=ensemble.AdaBoostRegressor(learning_rate=learn_rate,n_estimators=500)\r\n clf.fit(X_train,Y_train)\r\n trains_score.append(clf.score(X_train,Y_train))\r\n tests_score.append(clf.score(X_test,Y_test))\r\n ax.plot(learning_rates, trains_score, label = 'trains score', marker='o')\r\n ax.plot(learning_rates, tests_score, label = 'tests score', marker='*')\r\n ax.set_xlabel(r\"learning rate\")\r\n ax.set_ylabel(\"score\")\r\n ax.legend(loc='best')\r\n ax.set_title(\"AdaBoostRegressor with learning rate\")\r\n plt.show()\r\n\r\nX_train,X_test, Y_train,Y_test = load_data_classification()\r\ntest_AdaBoostRegressor_learning_rate(X_train,X_test, Y_train,Y_test)\r\ntest_AdaBoostRegressor_base_classifier(X_train,X_test, Y_train,Y_test)\r\ntest_AdaBoostRegressor_loss(X_train,X_test, Y_train,Y_test)\r\ntest_AdaBoostRegressor(X_train,X_test, Y_train,Y_test)","sub_path":"8.2 AdaBoostRegression.py","file_name":"8.2 AdaBoostRegression.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"20578397","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport numpy as np\nimport time\nfrom models.losses import FocalLoss, RegL1Loss, RegLoss, NormRegL1Loss, RegWeightedL1Loss, CrossEntropy2d\nfrom models.decode import ctdet_decode\nfrom models.utils import _sigmoid\nimport cv2\n\nclass CtdetLoss(torch.nn.Module):\n def __init__(self, opt):\n super(CtdetLoss, self).__init__()\n self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()\n self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \\\n RegLoss() if opt.reg_loss == 'sl1' else None\n self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \\\n NormRegL1Loss() if opt.norm_wh else \\\n RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg\n self.crit_seg = CrossEntropy2d()\n self.opt = opt\n\n def forward(self, outputs, batch):\n opt = self.opt\n hm_loss, wh_loss, off_loss, sem_seg_loss = 0, 0, 0, 0\n for s in range(opt.num_stacks):\n output = outputs[s]\n if not opt.mse_loss:\n output['hm'] = _sigmoid(output['hm'])\n\n if opt.eval_oracle_hm:\n output['hm'] = batch['hm']\n if opt.eval_oracle_wh:\n output['wh'] = torch.from_numpy(gen_oracle_map(\n batch['wh'].detach().cpu().numpy(), \n batch['ind'].detach().cpu().numpy(), \n output['wh'].shape[3], output['wh'].shape[2])).to(opt.device)\n if opt.eval_oracle_offset:\n output['reg'] = torch.from_numpy(gen_oracle_map(\n batch['reg'].detach().cpu().numpy(), \n batch['ind'].detach().cpu().numpy(), \n output['reg'].shape[3], output['reg'].shape[2])).to(opt.device)\n hm_loss += self.crit(output['hm'], batch['hm'], batch['unconf_hm']) / opt.num_stacks\n if opt.wh_weight > 0:\n if opt.dense_wh:\n mask_weight = batch['dense_wh_mask'].sum() + 1e-4\n wh_loss += (\n self.crit_wh(output['wh'] * batch['dense_wh_mask'],\n batch['dense_wh'] * batch['dense_wh_mask']) / \n mask_weight) / opt.num_stacks\n elif opt.cat_spec_wh:\n wh_loss += self.crit_wh(\n output['wh'], batch['cat_spec_mask'],\n batch['ind'], batch['cat_spec_wh']) / opt.num_stacks\n else:\n wh_loss += self.crit_reg(\n output['wh'], batch['reg_mask'],\n batch['ind'], batch['wh']) / opt.num_stacks\n if opt.task == 'ctdet_semseg':\n sem_seg_loss = torch.mean(self.crit_seg(output['seg'], batch['seg'][0], batch['weight_seg'][0]))\n if opt.reg_offset and opt.off_weight > 0:\n off_loss += self.crit_reg(output['reg'], batch['reg_mask'],\n batch['ind'], batch['reg']) / opt.num_stacks\n \n loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \\\n opt.off_weight * off_loss + sem_seg_loss\n loss_stats = {'loss': loss, 'hm_loss': hm_loss,\n 'wh_loss': wh_loss, 'off_loss': off_loss, 'sem_seg_loss': sem_seg_loss}\n return loss, loss_stats","sub_path":"src/lib/trains/ctdet_loss.py","file_name":"ctdet_loss.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"442131472","text":"# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport dataclasses\nimport logging\nimport traceback\nfrom pathlib import Path\nfrom typing import Optional\n\nimport click\n\nfrom model_navigator.cli.convert_model import ConversionSetConfig\nfrom model_navigator.cli.spec import (\n ComparatorConfigCli,\n ConversionSetHelmChartConfigCli,\n DatasetProfileConfigCli,\n ModelConfigCli,\n ModelSignatureConfigCli,\n TritonModelInstancesConfigCli,\n TritonModelOptimizationConfigCli,\n TritonModelSchedulerConfigCli,\n)\nfrom model_navigator.converter.config import ComparatorConfig, DatasetProfileConfig\nfrom model_navigator.framework import SUFFIX2FRAMEWORK\nfrom model_navigator.kubernetes import ChartGenerator, HelmChartGenerationResult\nfrom model_navigator.kubernetes.triton import TritonServer\nfrom model_navigator.log import init_logger, log_dict\nfrom model_navigator.model import ModelConfig, ModelSignatureConfig\nfrom model_navigator.results import ResultsStore, State, Status\nfrom model_navigator.triton.config import (\n TritonModelInstancesConfig,\n TritonModelOptimizationConfig,\n TritonModelSchedulerConfig,\n)\nfrom model_navigator.utils.cli import common_options, options_from_config\nfrom model_navigator.utils.workspace import Workspace\nfrom model_navigator.validators import run_command_validators\n\nLOGGER = logging.getLogger(\"helm_chart_create\")\n\n\n@click.command(name=\"helm-chart-create\", help=\"Create helm chart for given configuration\")\n@common_options\n@options_from_config(ModelConfig, ModelConfigCli)\n@click.option(\n \"--charts-repository\",\n type=click.Path(writable=True),\n required=True,\n help=\"Path to Helm Charts repository.\",\n)\n@click.option(\"--chart-name\", required=True, help=\"Name of the chart in Helm Charts repository.\")\n@click.option(\"--chart-version\", required=False, help=\"Version of the chart in Helm Charts repository.\")\n@options_from_config(ModelSignatureConfig, ModelSignatureConfigCli)\n@options_from_config(ConversionSetConfig, ConversionSetHelmChartConfigCli)\n@options_from_config(ComparatorConfig, ComparatorConfigCli)\n@options_from_config(DatasetProfileConfig, DatasetProfileConfigCli)\n@options_from_config(TritonModelOptimizationConfig, TritonModelOptimizationConfigCli)\n@options_from_config(TritonModelSchedulerConfig, TritonModelSchedulerConfigCli)\n@options_from_config(TritonModelInstancesConfig, TritonModelInstancesConfigCli)\n@click.pass_context\ndef helm_chart_create_cmd(\n ctx,\n verbose: bool,\n workspace_path: str,\n override_workspace: bool,\n charts_repository: str,\n chart_name: str,\n chart_version: Optional[str],\n container_version: str,\n triton_docker_image: Optional[str],\n framework_docker_image: Optional[str],\n **kwargs,\n):\n init_logger(verbose=verbose)\n LOGGER.debug(f\"Running '{ctx.command_path}' with config_path: {kwargs.get('config_path')}\")\n\n run_command_validators(\n ctx.command.name,\n configuration={\n \"verbose\": verbose,\n \"workspace_path\": workspace_path,\n \"override_workspace\": override_workspace,\n \"charts_repository\": charts_repository,\n \"chart_name\": chart_name,\n \"chart_version\": chart_version,\n \"container_version\": container_version,\n \"triton_docker_image\": triton_docker_image,\n \"framework_docker_image\": framework_docker_image,\n **kwargs,\n },\n )\n\n workspace = Workspace(workspace_path)\n charts_repository = Path(charts_repository).resolve()\n\n src_model_config = ModelConfig.from_dict(kwargs)\n src_model_signature_config = ModelSignatureConfig.from_dict(kwargs)\n conversion_set_config = ConversionSetConfig.from_dict(kwargs)\n comparator_config = ComparatorConfig.from_dict(kwargs)\n dataset_profile_config = DatasetProfileConfig.from_dict(kwargs)\n optimization_config = TritonModelOptimizationConfig.from_dict(kwargs)\n scheduler_config = TritonModelSchedulerConfig.from_dict(kwargs)\n instances_config = TritonModelInstancesConfig.from_dict(kwargs)\n\n framework = SUFFIX2FRAMEWORK[src_model_config.model_path.suffix]\n framework_docker_image = framework_docker_image or framework.container_image(container_version)\n triton_docker_image = triton_docker_image or TritonServer.container_image(container_version)\n\n if verbose:\n log_dict(\n \"helm_chart_create_cmd args:\",\n {\n **dataclasses.asdict(src_model_config),\n **dataclasses.asdict(src_model_signature_config),\n **dataclasses.asdict(conversion_set_config),\n **dataclasses.asdict(comparator_config),\n **dataclasses.asdict(dataset_profile_config),\n **dataclasses.asdict(optimization_config),\n **dataclasses.asdict(scheduler_config),\n **dataclasses.asdict(instances_config),\n \"charts_repository\": charts_repository,\n \"chart_name\": chart_name,\n \"workspace_path\": workspace.path,\n \"override_workspace\": override_workspace,\n \"container_version\": container_version,\n \"framework_docker_image\": framework_docker_image,\n \"triton_docker_image\": triton_docker_image,\n \"verbose\": verbose,\n },\n )\n\n output_path = charts_repository / chart_name\n\n LOGGER.debug(\"Obtaining conversion config for Helm Chart\")\n conversion_set_config_lst = list(conversion_set_config)\n conversion_config = conversion_set_config_lst[0]\n\n try:\n helm_chart_generator = ChartGenerator(\n triton_docker_image=triton_docker_image, framework_docker_image=framework_docker_image\n )\n helm_chart_generation_result = helm_chart_generator.run(\n src_model=src_model_config,\n src_model_signature_config=src_model_signature_config,\n conversion_config=conversion_config,\n comparator_config=comparator_config,\n dataset_profile_config=dataset_profile_config,\n optimization_config=optimization_config,\n scheduler_config=scheduler_config,\n instances_config=instances_config,\n output_path=output_path,\n chart_version=chart_version,\n )\n except Exception:\n message = traceback.format_exc()\n LOGGER.debug(f\"Encountered exception \\n{message}\")\n return HelmChartGenerationResult(\n status=Status(state=State.FAILED, message=message),\n triton_docker_image=triton_docker_image,\n framework_docker_image=framework_docker_image,\n src_model_config=src_model_config,\n src_model_signature_config=src_model_signature_config,\n conversion_config=conversion_config,\n comparator_config=comparator_config,\n dataset_profile_config=dataset_profile_config,\n optimization_config=optimization_config,\n scheduler_config=scheduler_config,\n instances_config=instances_config,\n helm_chart_dir_path=None,\n )\n\n results_store = ResultsStore(workspace)\n results_store.dump(ctx.command.name.replace(\"-\", \"_\"), [helm_chart_generation_result])\n\n return helm_chart_generation_result\n","sub_path":"model_navigator/cli/helm_chart_create.py","file_name":"helm_chart_create.py","file_ext":"py","file_size_in_byte":7830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"118584454","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom wxbot import *\nimport codecs\nclass MyWXBot(WXBot):\n \n def handle_msg_all(self, msg): \n if msg['msg_type_id'] == 3 and msg['content']['type'] == 0:\n if 'detail' in msg['content']:\n print(msg)\n if msg['content']['data'] == \"BotStart1\":\n print(\"I'm setting group1 to \" + msg['user']['id'])\n global group1\n group1 = msg['user']['id']\n elif msg['content']['data'] == \"BotStart2\":\n global group2\n group2 = msg['user']['id']\n if msg['user']['id'] == group1:\n self.send_msg_by_uid(msg['content']['data'] + ' (From: ' + msg['content']['user']['name'] + ')', group2)\n elif msg['user']['id'] == group2:\n self.send_msg_by_uid(msg['content']['data'] + ' (From: ' + msg['content']['user']['name'] + ')', group1)\n else:\n print(msg['user']['id'])\n\n\ndef main():\n bot = MyWXBot()\n bot.DEBUG = True\n bot.conf['qr'] = 'png'\n bot.run()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"360177161","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/otfbot/plugins/ircClient/commands.py\n# Compiled at: 2011-04-22 06:35:42\n\"\"\"\nReact to !commands with text from a commands.txt file\n\"\"\"\nfrom otfbot.lib import chatMod\nfrom otfbot.lib import functions\nfrom otfbot.lib.pluginSupport.decorators import callback\nimport string, re, random, os\n\nclass Plugin(chatMod.chatMod):\n\n def __init__(self, bot):\n self.bot = bot\n self.channels = []\n self.mtime = 0\n\n @callback\n def connectionMade(self):\n self.start()\n\n @callback\n def joined(self, channel):\n filename = self.bot.config.getPath('file', datadir, 'commands.txt', 'commands', self.bot.network, channel)\n enc = self.bot.config.get('fileencoding', 'iso-8859-15', 'commands', self.bot.network, channel)\n self.commands[channel] = functions.loadProperties(filename, True, enc)\n\n @callback\n def command(self, user, channel, command, options):\n user = user.getNick()\n if self.bot.config.getBool('autoReload', True, 'commands', self.bot.network, channel):\n net_file = self.bot.config.getPath('file', datadir, 'commands.txt', 'commands', self.bot.network)\n network_mtime = os.stat(net_file).st_mtime\n global_file = self.bot.config.getPath('file', datadir, 'commands.txt', 'commands')\n general_mtime = os.stat(global_file).st_mtime\n if self.mtime < network_mtime or self.mtime < general_mtime:\n self.reload()\n self.mtime = max(network_mtime, general_mtime)\n if user != self.bot.nickname:\n answer = self.respond(channel, user, command.lower(), options)\n if answer != '':\n if answer[0] == ':':\n self.bot.sendmsg(channel, answer[1:])\n else:\n self.bot.sendme(channel, answer)\n\n def start(self):\n self.register_ctl_command(self.reload)\n self.commands = {}\n enc = self.bot.config.get('fileencoding', 'iso-8859-15', 'commands')\n file = self.bot.config.getPath('file', datadir, 'commands.txt', 'commands')\n self.commands['general'] = functions.loadProperties(file, True, enc)\n enc = self.bot.config.get('fileencoding', 'iso-8859-15', 'commands', self.bot.network)\n file = self.bot.config.getPath('file', datadir, 'commands.txt', 'commands', self.bot.network)\n self.commands['network'] = functions.loadProperties(file, True, enc)\n for chan in self.bot.channels:\n self.joined(chan)\n\n def reload(self):\n self.start()\n return 'reloaded commands'\n\n def getCommand(self, channel, cmd):\n if channel not in self.commands:\n self.commands[channel] = {}\n if channel in self.commands and cmd in self.commands[channel]:\n return self.commands[channel][cmd]\n else:\n if 'network' in self.commands and cmd in self.commands['network']:\n return self.commands['network'][cmd]\n if 'general' in self.commands and cmd in self.commands['general']:\n return self.commands['general'][cmd]\n return ''\n\n def respond(self, channel, user, command, options):\n \"\"\"\n respond to a command, substituting USER by the actual user\n and OTHER by the given options\n\n >>> c=Plugin(None)\n >>> c.commands={} #just for the example to work\n >>> c.commands['general']={\"test\": [\"USER wanted a test\"],\n >>> \"test_\": [\"USER wanted to show OTHER how it works\"]}\n >>> c.commands['network']={}\n >>> #example begins here:\n >>> c.respond(\"\", \"testuser\", \"test\", \"\")\n 'testuser wanted a test'\n >>> c.respond(\"\", \"testuser\", \"test\", \"you\")\n 'testuser wanted to show you how it works'\n \"\"\"\n answer = ''\n if len(options) >= 1:\n options = options.rstrip()\n answers = self.getCommand(channel, command + '_')\n if len(answers):\n answer = random.choice(answers)\n answer = re.sub('OTHER', options, answer)\n else:\n answers = self.getCommand(channel, command)\n if len(answers):\n answer = random.choice(answers)\n answer = re.sub('USER', user, answer)\n if len(answer) > 0 and answer[(-1)] == '\\n':\n return answer[0:-1]\n else:\n return answer","sub_path":"pycfiles/otfbot-1.0.0-py2.6/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":4570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"525608777","text":"#kadai11b 16D8101021C 野口大輔 2017-12-11\n\nimport numpy as np\n\nA=np.loadtxt(\"matrix.csv\",delimiter=\",\",dtype=int)\n\nprint(\"行数:\",A.shape[0])\nprint(\"列数:\",A.shape[1])\n\nf=np.zeros((A.shape[0],A.shape[1]))\n\nf[0][0]=A[0][0]\nfor q in range(A.shape[1]-1):\n f[0][q+1]=f[0][q]+A[0][q+1]\n\nfor p in range(A.shape[0]-1):\n f[p+1][0]=f[p][0]+A[p+1][0]\n\n#print(f)\nfor q in range(1,A.shape[1]):\n for p in range(1,A.shape[0]):\n f[p][q]=min(f[p-1][q]+A[p][q],f[p][q-1]+A[p][q])\n \n#print(\"\\n\",f)\nprint(\"最短距離の総和は\",int(f[A.shape[0]-1][A.shape[1]-1]))\n\n\"\"\"\n(py361) [16D8101021C@ise31c ~]$ python kadai11b.py\n行数: 79\n列数: 80\n最短距離の総和は 421739\n\"\"\"\n","sub_path":"reports/kadai11b.py","file_name":"kadai11b.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"213436305","text":"import dlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\nimport math\nimport os\n\ndef distances(points):\n dist = []\n for i in range(points.shape[0]):\n for j in range(points.shape[0]):\n p1 = points[i,:]\n p2 = points[j,:] \n dist.append( math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2) )\n return dist\n\ndef get_bounding_box(rect):\n\t# take a bounding predicted by dlib and convert it\n\t# to the format (x, y, w, h) \n\tx = rect.left()\n\ty = rect.top()\n\tw = rect.right() - x\n\th = rect.bottom() - y \n\treturn x, y, w, h\n\ndef shape_to_np(shape, num_coords, dtype=\"int\"):\n\t# initialize the list of (x, y)-coordinates\n\tcoords = np.zeros((num_coords, 2), dtype=dtype)\n \t# loop over the facial landmarks and convert them\n\t# to a 2-tuple of (x, y)-coordinates\n\tfor i in range(0, num_coords):\n\t\tcoords[i] = (shape.part(i).x, shape.part(i).y) \n\t# return the list of (x, y)-coordinates\n\treturn coords\n\ndef get_landmarks(images, labels, save_directory=\"\", num_coords=5, to_save=False):\n \n print(\"Getting %d facial landmarks\" % num_coords)\n landmarks = []\n new_labels = []\n img_ct = 0\n \n if num_coords == 5:\n predictor_path = 'shape_predictor_5_face_landmarks.dat'\n else:\n predictor_path = 'shape_predictor_68_face_landmarks.dat'\n\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(predictor_path)\n\n for img, label in zip(images, labels):\n # Ask the detector to find the bounding boxes of each face. The 1 in the\n # second argument indicates that we should upsample the image 1 time. This\n # will make everything bigger and allow us to detect more faces.\n img_ct += 1\n detected_faces = detector(img, 1)\n for d in detected_faces:\n new_labels.append(label)\n x, y, w, h = get_bounding_box(d) \n # Get the landmarks/parts for the face in box d.\n points = shape_to_np(\n predictor(img, d), \n num_coords) \n \n dist = distances(points) \n landmarks.append(dist) \n \n if to_save:\n for (x_, y_) in points:\n cv2.circle(img, \n (x_, y_), \n 1, \n (0, 255, 0), \n -1)\n plt.figure()\n plt.imshow(img)\n if not os.path.isdir(save_directory):\n os.mkdir(save_directory)\n plt.savefig(save_directory + label + '%d.png' % img_ct)\n plt.close()\n \n if img_ct % 50 == 0:\n print(\"%d images with facial landmarks completed.\" % img_ct)\n \n return np.array(landmarks), np.array(new_labels)\n \n \n ","sub_path":"CAP 4103 - Mobile Biometrics/GL2-HATCHER/get_landmarks.py","file_name":"get_landmarks.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"78329636","text":"from flask import render_template, redirect, url_for\nfrom flask_login import login_required, current_user\nfrom app import app, db\nfrom forms.book_form import UpdateBookForm, DeleteBookForm\nfrom models.book import Book\n\n\n@app.route(\"/books\")\n@login_required\ndef books():\n books = current_user.books\n new_book_form = UpdateBookForm()\n\n return render_template(\"book/books.html\", title=\"Books\", books=books, new_book_form=new_book_form)\n\n\n@app.route(\"/books/new\", methods=[\"POST\"])\n@login_required\ndef new_book():\n form = UpdateBookForm()\n if form.validate_on_submit():\n name = form.name.data\n author = form.author.data\n pages_number = form.pages_number.data\n owner_id = current_user.id\n book = Book(name, author, pages_number, owner_id)\n\n db.session.add(book)\n db.session.commit()\n\n return redirect(url_for(\"books\"))\n\n return render_template(\"book/books.html\", new_book_form=form)\n\n\n@app.route(\"/books/update/\", methods=[\"GET\"])\n@login_required\ndef update_book(book_id):\n book = Book.query.filter_by(id=book_id).first()\n form = UpdateBookForm(obj=book)\n\n return render_template(\"book/update.html\", plant_id=book_id, form=form)\n\n\n@app.route(\"/books/update\", methods=[\"POST\"])\n@login_required\ndef do_update():\n form = UpdateBookForm()\n if form.validate_on_submit():\n book = Book.query.filter_by(id=form.id.data).first()\n book.name = form.name.data\n book.author = form.author.data\n book.pages_number = form.pages_number.data\n\n db.session.commit()\n\n return redirect(url_for(\"books\"))\n\n return render_template(\"book/update.html\", form=form)\n\n\n@app.route(\"/books/delete/\", methods=[\"GET\"])\n@login_required\ndef delete_book(book_id):\n book = Book.query.filter_by(id=book_id).first()\n form = DeleteBookForm(obj=book)\n\n return render_template(\"book/delete.html\", form=form, book=book)\n\n\n@app.route(\"/books/delete\", methods=[\"POST\"])\n@login_required\ndef do_delete():\n form = DeleteBookForm()\n if form.validate_on_submit():\n book = Book.query.filter_by(id=form.id.data).first()\n db.session.delete(book)\n db.session.commit()\n\n return redirect(url_for(\"books\"))\n\n return render_template(\"book/delete.html\", form=form)\n","sub_path":"controllers/books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"1885157","text":"# coding: utf-8\n\n\"\"\"\n Known Pose API\n\n Define and retrieve (accurate) semantic poses in a 2D environment. This version is a workaround for some not yet available features of the OpenAPI Specification v3.0 (e.g. `anyOf` as a response type and adding a `discriminator` for supporting polymorphism/composition). Therefore, ATM the sub-components BasicPose and InaccuratePose are reference objects inside their respective parent model. # noqa: E501\n\n The version of the OpenAPI document: 1.1.0\n Contact: Kai.Waelti@dfki.de\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass AccuratePose(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'inaccurate_pose': 'InaccuratePose',\n 'reference_scan': 'str'\n }\n\n attribute_map = {\n 'inaccurate_pose': 'inaccurate_pose',\n 'reference_scan': 'reference_scan'\n }\n\n def __init__(self, inaccurate_pose=None, reference_scan=None): # noqa: E501\n \"\"\"AccuratePose - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._inaccurate_pose = None\n self._reference_scan = None\n self.discriminator = None\n\n self.inaccurate_pose = inaccurate_pose\n self.reference_scan = reference_scan\n\n @property\n def inaccurate_pose(self):\n \"\"\"Gets the inaccurate_pose of this AccuratePose. # noqa: E501\n\n\n :return: The inaccurate_pose of this AccuratePose. # noqa: E501\n :rtype: InaccuratePose\n \"\"\"\n return self._inaccurate_pose\n\n @inaccurate_pose.setter\n def inaccurate_pose(self, inaccurate_pose):\n \"\"\"Sets the inaccurate_pose of this AccuratePose.\n\n\n :param inaccurate_pose: The inaccurate_pose of this AccuratePose. # noqa: E501\n :type: InaccuratePose\n \"\"\"\n if inaccurate_pose is None:\n raise ValueError(\"Invalid value for `inaccurate_pose`, must not be `None`\") # noqa: E501\n\n self._inaccurate_pose = inaccurate_pose\n\n @property\n def reference_scan(self):\n \"\"\"Gets the reference_scan of this AccuratePose. # noqa: E501\n\n Contents of the reference scan assembled as point cloud data following [RFC 4648 § 5](https://tools.ietf.org/html/rfc4648#section-5) # noqa: E501\n\n :return: The reference_scan of this AccuratePose. # noqa: E501\n :rtype: str\n \"\"\"\n return self._reference_scan\n\n @reference_scan.setter\n def reference_scan(self, reference_scan):\n \"\"\"Sets the reference_scan of this AccuratePose.\n\n Contents of the reference scan assembled as point cloud data following [RFC 4648 § 5](https://tools.ietf.org/html/rfc4648#section-5) # noqa: E501\n\n :param reference_scan: The reference_scan of this AccuratePose. # noqa: E501\n :type: str\n \"\"\"\n if reference_scan is None:\n raise ValueError(\"Invalid value for `reference_scan`, must not be `None`\") # noqa: E501\n\n self._reference_scan = reference_scan\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, AccuratePose):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"python/known_pose_client/models/accurate_pose.py","file_name":"accurate_pose.py","file_ext":"py","file_size_in_byte":4769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"277264070","text":"import random\nimport subprocess\nfrom pathlib import Path\nfrom itertools import chain\n\nimport pytest\n\nfrom exam import (\n MultiChoiceAnswer,\n TrueFalseAnswer,\n MultiChoiceQuest,\n TrueFalseQuest,\n Exam,\n)\nfrom export import SerializeExam, RLInterface\nfrom utility import CSVReader\nfrom unit_helper import fd_input, save_mono_question_data\n\n\n@pytest.fixture\ndef dummy_exam():\n q1 = MultiChoiceQuest(\n \"question 1: correct is n. 2\", \"subject 1\", Path(\"resources/a.png\")\n )\n a1 = MultiChoiceAnswer(\"answer 1\", Path(\"resources/b.png\"))\n a2 = MultiChoiceAnswer(\"answer 2\", Path(\"resources/c.png\"))\n a3 = MultiChoiceAnswer(\"answer 3\", Path(\"resources/a.png\"))\n q1.answers = (a1, a2, a3)\n q1.correct_option = \"B\"\n\n q2 = MultiChoiceQuest(\n \"question 2: correct is n. 1\", \"subject 1\", Path(\"resources/a.png\")\n )\n a1 = MultiChoiceAnswer(\"answer 1\")\n a2 = MultiChoiceAnswer(\"answer 2\")\n a3 = MultiChoiceAnswer(\"answer 3\")\n q2.answers = (a1, a2, a3)\n\n q3 = TrueFalseQuest(\"question 3: correct is True (first)\")\n a1 = TrueFalseAnswer(True)\n a2 = TrueFalseAnswer(False)\n q3.answers = (a1, a2)\n\n q4 = MultiChoiceQuest(\"question 4: no answer\", \"subject 2\", Path(\"resources/b.png\"))\n\n q5 = TrueFalseQuest(\"question 5: correct is False (first))\")\n a1 = TrueFalseAnswer(False)\n a2 = TrueFalseAnswer(True)\n q5.answers = (a1, a2)\n\n q6 = MultiChoiceQuest(\n \"question 6: correct is n. 3\", \"subject 4\", Path(\"resources/c.png\")\n )\n a1 = MultiChoiceAnswer(\"answer 1\")\n a2 = MultiChoiceAnswer(\"answer 2\")\n a3 = MultiChoiceAnswer(\"answer 3\")\n a4 = MultiChoiceAnswer(\"answer 4\")\n q6.add_answer(a1)\n q6.add_answer(a2)\n q6.add_answer(a3, is_correct=True)\n q6.add_answer(a4)\n dummy_ex = Exam(q1, q2, q3, q4, q5, q6)\n\n return dummy_ex\n\n\ndef test_print_have_a_look(tmp_path, dummy_exam):\n image_folder = Path(\"tests/unit/resources\")\n image_tmp_folder = tmp_path / image_folder.name\n image_tmp_folder.mkdir()\n for file in chain(image_folder.glob(\"*.png\"), image_folder.glob(\"*.jpg\")):\n data = file.read_bytes()\n copied_file = tmp_path / image_folder.name / file.name\n copied_file.write_bytes(data)\n\n random.seed()\n\n exam_file_path = tmp_path / \"Exam\"\n correction_file_path = tmp_path / \"Correction\"\n ex = dummy_exam\n folder = image_tmp_folder\n ex.add_path_parent(folder)\n ex.shuffle()\n serial_exam = SerializeExam(ex)\n pdf_interface = RLInterface(serial_exam.assignment(), exam_file_path)\n pdf_interface.build()\n pdf_interface = RLInterface(\n serial_exam.correction(),\n correction_file_path,\n top_item_bullet_type=\"A\",\n sub_item_bullet_type=\"1\",\n )\n pdf_interface.build()\n\n subprocess.Popen([\"evince\", str(exam_file_path)])\n subprocess.call([\"evince\", str(correction_file_path)])\n\n answer = fd_input(\"Is it correct (y)? \")\n\n assert answer == \"y\\n\"\n\n\ndef test_shuffle_from_csv(tmp_path):\n data_file = tmp_path / \"data.csv\"\n save_mono_question_data(data_file)\n\n data = CSVReader(str(data_file))\n rows = data.to_dictlist()\n\n exam = Exam()\n exam.attribute_selector = (\n \"question\",\n \"subject\",\n \"image\",\n \"void\",\n \"A\",\n \"void\",\n \"B\",\n \"void\",\n \"C\",\n \"void\",\n \"D\",\n \"void\",\n )\n\n exam.load(rows)\n exam.add_path_parent(data_file)\n random.seed(1)\n exam.shuffle()\n\n for question in exam.questions:\n assert question.correct_answer.text == \"a\"\n assert exam.questions[0]._answers[0].text == \"d\"\n assert exam.questions[1]._answers[0].text == \"d\"\n","sub_path":"tests/unit/test_quest2pdf.py","file_name":"test_quest2pdf.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"218270383","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\n\ndef main():\n data = pd.read_csv('../results/batch_size_var.csv')\n\n epoch = 0\n prev_batch = 16\n x = []\n y = []\n curr_x = []\n curr_y = []\n\n for index, row in data.iterrows():\n if row['BS'] != prev_batch:\n x.append(curr_x)\n y.append(curr_y)\n curr_y = []\n curr_x = []\n prev_batch = row['BS']\n epoch = 0\n\n curr_y.append(row['MSE'])\n curr_x.append(epoch)\n\n epoch += 1\n\n x.append(curr_x)\n y.append(curr_y)\n\n ax = plt.axes()\n\n for tick in ax.xaxis.get_major_ticks():\n # tick.label1.set_fontsize(fontsize)\n tick.label1.set_fontweight('bold')\n for tick in ax.yaxis.get_major_ticks():\n # tick.label1.set_fontsize(fontsize)\n tick.label1.set_fontweight('bold')\n\n # ax.spines['bottom'].set_color('white')\n # ax.spines['top'].set_color('white')\n # ax.spines['right'].set_color('white')\n # ax.spines['left'].set_color('white')\n # ax.xaxis.label.set_color('white')\n # ax.yaxis.label.set_color('white')\n # ax.tick_params(axis='x', colors='white')\n # ax.tick_params(axis='y', colors='white')\n\n batch_sizes = [16, 32, 64, 128, 256, 512, 1024]\n for idx in range(len(batch_sizes)):\n label = \"Batch Size: \" + str(batch_sizes[idx])\n plt.plot(x[idx], y[idx], marker='o', label=label)\n\n # plt.yscale('log')\n\n plt.yticks(fontsize=13)\n plt.xticks(fontsize=13)\n\n plt.xlabel('Epochs', fontsize=14, weight='bold')\n plt.ylabel('Result', fontsize=14, weight='bold')\n plt.legend(loc=\"best\", framealpha=0.3)\n # plt.savefig('plots/batch_size_var.png', transparent=True)\n plt.tight_layout()\n plt.savefig('plots/batch_size_var.png')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/Experiments/week 8/plotter/plot_bs_var.py","file_name":"plot_bs_var.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"414408263","text":"from django.shortcuts import render, redirect\r\nfrom django.template import RequestContext\r\nfrom django.http import HttpResponse, HttpResponseRedirect, FileResponse, JsonResponse\r\nfrom django.urls import reverse\r\nfrom threading import Thread\r\nfrom botocore.client import Config\r\nfrom smart_open import open\r\nimport os\r\nimport random\r\nimport string\r\nimport time\r\nimport logging\r\nimport json\r\nimport boto3\r\n\r\nfrom .models import ServiceRecord\r\nfrom .forms import SplitForm\r\nfrom . import handler\r\nfrom .bucketManager import get_presigned_url\r\n\r\n\r\n# Homepage View\r\ndef home(request):\r\n return render(request, os.path.join('split', 'home.html'))\r\n\r\n\t\r\n# S3 URL Generator View, called by JavaScript only\r\ndef sign_s3(request):\r\n\t# Generate a random unique identifying tag for the file\r\n\tfile_name = ''.join(random.choices(string.ascii_uppercase + string.digits, k=7))\r\n\tfile_type = request.GET.get('file_type')\r\n \r\n\t# Create .txt that contains URL link and other details\r\n\twith open(os.path.join('media', 'urltexts', file_name + '.txt'), 'w', encoding=\"utf-8\") as txtfile:\r\n\t\ttoWrite = 'https://%s.s3.us-east-2.amazonaws.com/%s' % (S3_BUCKET, file_name)\r\n\t\ttxtfile.write(toWrite + '\\n')\r\n\t\ttxtfile.write(request.GET.get('file_name'))\r\n \r\n\t# Bucket initialization and presigned post request\r\n\tS3_BUCKET = os.environ.get('AWS_STORAGE_BUCKET_NAME')\r\n\tREGION = 'us-east-2'\r\n\r\n\ts3 = boto3.client('s3', region_name=REGION, config=Config(signature_version='s3v4'), \r\n\taws_access_key_id=os.environ.get('AWS_ACCESS_KEY_ID'), aws_secret_access_key=os.environ.get('AWS_SECRET_ACCESS_KEY'))\r\n\r\n\tpresigned_post = s3.generate_presigned_post(\r\n\t\tBucket = S3_BUCKET,\r\n\t\tKey = file_name,\r\n\t\tFields = {\"acl\": \"public-read\", \"Content-Type\": file_type},\r\n\t\tConditions = [\r\n\t\t{\"acl\": \"public-read\"},\r\n\t\t{\"Content-Type\": file_type}\r\n\t\t],\r\n\t\tExpiresIn = 3600\r\n\t)\r\n\treturn JsonResponse({\r\n\t\t'data': presigned_post,\r\n\t\t'url': 'https://%s.s3.us-east-2.amazonaws.com/%s' % (S3_BUCKET, file_name),\r\n\t\t'txtName': file_name\r\n\t})\r\n\r\n\t\r\n# Split Tool View\r\ndef split(request):\r\n if request.method == 'POST':\r\n\t\t# Process the form if it is a POST request\r\n form = SplitForm(request.POST, request.FILES)\r\n if form.is_valid():\r\n formData = form.cleaned_data\r\n audioFile = formData['avfile']\r\n times = formData['times']\r\n titles = formData['titles']\r\n extension = formData['fileType']\r\n fileName = formData['fileName']\r\n url = formData['url']\r\n tag = formData['keyFileInfo']\r\n\t \r\n\t\t\t# Save data as a service record (object)\r\n if (request.user.is_authenticated):\r\n record = ServiceRecord(filename=fileName, filetype=extension, user=request.user, timestamps=formData['timestamps'])\r\n else:\r\n record = ServiceRecord(filename=fileName, filetype=extension, timestamps=formData['timestamps'])\r\n record.save()\r\n\r\n\t\t\t# Starts the splitting process (sample at bottom). Multithreaded so that a redirect response can be immediately sent\r\n if extension == 'wav':\r\n Thread(target=handler.trimWAV, args=(url, times, titles, tag)).start()\r\n else:\r\n Thread(target=handler.trimMP3, args=(url, times, titles, tag)).start()\r\n return redirect('download', tag=tag)\r\n else:\r\n\t\t# Load the form if it is a GET request\r\n form = SplitForm()\r\n return render(request, os.path.join('split', 'ss-tool.html'), {'form': form})\r\n\r\n\t\r\n# Download View\r\ndef download(request, tag): \r\n # Checks periodically for completed file\r\n time.sleep(10)\r\n try:\r\n\t\t# The existence of the .txt file signals the completion of the process\r\n txtpath = os.path.join(\"media\", \"tempzips\", 'SS-' + tag + '.txt')\r\n with open(txtpath, 'rb') as f:\r\n\t\t\t# Time check ensures that an old file is not retrieved. (This would occur when the user resubmits a request without reloading or reuploading)\r\n if time.time() - os.path.getmtime(txtpath) > 11:\r\n return redirect('download', tag=tag)\r\n return FileResponse(open(os.path.join(\"media\", \"tempzips\",'SS-' + tag + '.zip'), 'rb'), filename = 'SSArchive.zip')\r\n # Otherwise redirect\r\n except:\r\n return redirect('download', tag=tag)\r\n \r\n\r\n# Loader.to Downloader Tool View\r\ndef loaderdl(request):\r\n return render(request, os.path.join('split', 'loaderdl.html'))\r\n\r\n\t\r\n# About View\r\ndef about(request):\r\n return render(request, os.path.join('split', 'about.html'))\r\n\t","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"483129746","text":"from GUI.GUI_plotter import Window_Plotter\nfrom PyQt5 import QtWidgets, QtCore\nimport sys, os, pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nclass Plotter(QtWidgets.QMainWindow):\n trigger=QtCore.pyqtSignal()\n def __init__(self,*args, **kwargs):\n super(Plotter, self).__init__(*args, **kwargs)\n self.window=Window_Plotter()\n self.x=np.linspace(0,10000,1000)\n def __sigmoid(self,params):\n return 1./(1+np.exp(params[0]*(self.x+params[1])))\n def init_win(self):\n self.window.setupUi(self)\n self.window.exit_button['button'].clicked.connect(self.close_window)\n self.window.folder_button['button'].clicked.connect(self.choose_folder)\n self.window.neuron_button['button'].clicked.connect(self.show_activations)\n #%%\n self.window.vision1_button['button'].clicked.connect(lambda: self.mean_series(\"vision_range\"))\n self.window.meta1_button['button'].clicked.connect(lambda: self.mean_series(\"metabolism\"))\n self.window.hunger1_button['button'].clicked.connect(lambda: self.mean_series(\"hunger_rate\"))\n self.window.avoid1_button['button'].clicked.connect(lambda: self.mean_series(\"avoid_rate\"))\n #%%\n self.window.vision2_button['button'].clicked.connect(lambda: self.histogram(\"vision_range\",[0,1000]))\n self.window.meta2_button['button'].clicked.connect(lambda: self.histogram(\"metabolism\",[0,3]))\n self.window.hunger2_button['button'].clicked.connect(lambda: self.histogram(\"hunger_rate\",[0,5]))\n self.window.avoid2_button['button'].clicked.connect(lambda: self.histogram(\"avoid_rate\",[0,5]))\n self.file=None\n self.trigger.connect(self.update_combo)\n def mean_series(self,param):\n progression=[]\n lens=[]\n for step in self.names_str:\n path=self.file+\"/params\"+step+\".sav\"\n with open(path,\"rb\") as f:\n current_agents=pickle.load(f)\n param_list=[agent[param] for agent in current_agents]\n progression.append(param_list)\n lens.append(len(param_list))\n data=np.zeros((max(lens),len(progression)))\n for i in range(data.shape[1]):\n sample=np.array(progression[i])\n if lens[i]= 4:\n dayinfoURL = aURs[0]['href']\n boxScoreURL = aURs[3]['href']\n\n thValues = key.find_all('th')\n dateGame = ''\n if len(thValues) >= 1:\n dateGame = thValues[0].text\n\n def get_team(teamvalues):\n array = np.zeros(len(teamvalues), dtype=object)\n for i, value in enumerate(teamvalues):\n array[i] = value.text.replace('\\n', '\\t')\n return array\n\n tdTeam = np.concatenate(\n (np.array([dateGame]), get_team(tdValues), np.array([dayinfoURL, boxScoreURL, isPlayoffs])))\n if tdTeam.size != len(columns):\n continue\n teamArray = tdTeam.reshape(1, len(columns))\n team = pd.DataFrame(teamArray, columns=columns)\n teams = teams.append(team)\n\n # print(columns[0])\n teams = teams.set_index(columns[0])\n\n fileName = '../data/season/' + index + '.' + lg + '.' + table['id']\n # print(fileName)\n teams.to_csv(fileName, encoding='utf-8', mode='a', header=isFirst)\n isFirst = False\n\n\ndef main(argv):\n \"\"\"The main method for this module.\n \"\"\"\n game_in_each_season_details(argv[0])\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])","sub_path":"crawl/game_in_each_season_details.py","file_name":"game_in_each_season_details.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"633180929","text":"import glob\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom moviepy.editor import VideoFileClip\n\nimport model\nimport features\nimport tracking\n\nplt.rcParams['figure.dpi'] = 50\nplt.rcParams['figure.figsize'] = (30,)*2\nplt.rcParams['axes.titlesize'] = 36\nplt.rcParams['xtick.labelsize'] = 24\nplt.rcParams['ytick.labelsize'] = 24\n\ndef get_random_image(glob_expr):\n file_list = glob.glob(glob_expr)\n return cv2.imread(np.random.choice(file_list))\n\ndef plot_hogs(vehicle, non_vehicle, color_spaces=(\"RGB\", \"HLS\", \"YUV\")):\n for cspace in color_spaces:\n plt.figure(figsize=(30, 20))\n for col, img, name in ((0, vehicle, \"Car\"), (2, non_vehicle, \"Non-Car\")):\n new = features.convert_color(img, cspace)\n for ch in range(len(cspace)):\n hog = features.get_hog_features(new, orient=9,\n pix_per_cell=8,\n cell_per_block=2,\n vis=True,\n channel=ch)[0][1]\n plt.subplot2grid((len(cspace), 4), (ch, col))\n plt.imshow(new[:, :, ch], cmap='gray')\n plt.title(\"%s %s[%s]\" % (name, cspace, cspace[ch]))\n\n plt.subplot2grid((len(cspace), 4), (ch, col + 1))\n plt.imshow(hog, cmap='gray')\n plt.title(\"%s HoG-%s[%s]\" % (name, cspace, cspace[ch]))\n plt.tight_layout()\n plt.show()\n\ndef plot_windows(*images, scale=1, hog_cells_per_step=(2, 8), colors=[model.blue, model.red]):\n yrange = model.yranges[model.scales.index(scale)]\n offset = np.array(((0, yrange[0]), (0, yrange[0])))\n for image in images:\n result = image\n for step, color in zip(hog_cells_per_step, colors):\n windows = features.generate_windows(image[yrange[0]:yrange[1]].shape[:2],\n hog_cells_per_step=step,\n scale=scale)\n next(windows) #discard first, it is the resampled shape\n result = tracking.draw_boxes(\n result, [(0, w[2] + offset) for w in windows],\n colors=[color], thick=2,\n )\n plt.imshow(result[..., ::-1])\n plt.show()\n\ndef plot_result(image=None, name=None, use_heatmap=False, static=True):\n if isinstance(image, str):\n path = image\n image = cv2.imread(path)[..., ::-1]\n if name is None:\n name = path\n else:\n if name is None:\n name = 'in-memory'\n\n if static:\n model.tracker.reset()\n model.tracker.__dict__.update(model.static_params)\n\n result, unmerged = model.tracker(image, return_unmerged=True)\n titles = [\"%s (original boxes)\" % name,\n \"%s (merged boxes)\" % name]\n if use_heatmap:\n result, unmerged = model.tracker.heatmap, cv2.addWeighted(result, 2/3,\n unmerged, 1/3,\n 0)\n titles = [\"%s (boxes)\" % name,\n \"%s (heatmap)\" % name]\n\n plt.subplot(121)\n plt.imshow(unmerged)\n plt.title(titles[0])\n\n plt.subplot(122)\n plt.imshow(result, cmap='hot', vmin=0, vmax=max(10, model.tracker.heat_thresholds[1]*3))\n plt.title(titles[1])\n\n plt.tight_layout()\n plt.show()\n\ndef plot_frames(path, start, n_frames=6, prerun=True, use_heatmap=False):\n clip = VideoFileClip(path)\n model.tracker.reset()\n for second in np.arange(-model.tracker.fir_length, 0)/clip.fps + start:\n frame = clip.get_frame(second)\n model.tracker(frame)\n for second in np.arange(n_frames)/clip.fps + start:\n frame = clip.get_frame(second)\n plot_result(frame, \"%s@%.2fs\" % (path, second), use_heatmap, static=False)\n","sub_path":"writeup.py","file_name":"writeup.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"342878506","text":"# 在原有的django的view视图基础上在封装\n# 增加一些新功能\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom user import models\n\n# Create your views here.\n\n\n# 用户登录\nimport time, hashlib\n\n\ndef get_token(username, password):\n\n # token --> 由于当前时间,用户和密码生成,确保唯一性\n current = str(int(time.time() * 1000))\n md5_obj = hashlib.md5(current.encode('utf8'))\n md5_obj.update(username.encode('utf8'))\n md5_obj.update(password.encode('utf8'))\n\n return md5_obj.hexdigest()\n\n\n# 注册\n# class Register(APIView):\n#\n# # 取消 认证 权限\n# permission_classes = []\n# authentication_classes = []\n# throttle_classes = []\n#\n# def post(self, request, *args, **kwargs):\n#\n# ret = {\n# 'code': 1,\n# 'msg': '注册成功'\n# }\n# name = request._request.POST.get('name')\n# password = request._request.POST.get('password')\n# birthday = request._request.POST.get('birthday')\n# try:\n# obj = models.User.objects.filter(name=name).first()\n#\n# if obj:\n# # 用户user import models已存在\n# ret['code'] = 0\n# ret['msg'] = '用户已存在'\n# else:\n# # 用户不存在\n# models.User.objects.create(name=name, password=password, birthday=birthday)\n# ret['user'] = name\n#\n# except Exception as e:\n# print(e)\n# ret['code'] = 0\n# ret['msg'] = '捕获异常'\n#\n# return Response(ret)\n\n\n# 登录\nclass Login(APIView):\n\n # 取消 认证 权限\n permission_classes = []\n authentication_classes = []\n throttle_classes = []\n\n def post(self, request, *args, **kwargs):\n\n ret = {\n 'code': 1,\n 'msg': '登录成功'\n }\n name = request._request.POST.get('name')\n password = request._request.POST.get('password')\n try:\n obj = models.User.objects.filter(name=name).first()\n # 1.判断是否存在\n if obj:\n # 用户已存在\n # 2.判断用户名 密码是否正确\n if obj.password == password:\n # 登录成功\n # 3.创建登录的标识符 (token)\n # 生成用户登录的唯一标识,每次登录都获取新的token,\n token = get_token(obj.name, obj.password)\n print(token)\n # 表中如果没有则创建数据,存在则更新数据\n models.Token.objects.update_or_create(user=obj, defaults={'token': token})\n\n # 4.返回登录后的信息 (tiken字符串)\n # 将token登录标识返回给用户\n ret['token'] = token\n\n else:\n ret['code'] = 2\n ret['msg'] = '用户名或密码错误'\n\n\n else:\n # 用户不存在\n ret['code'] = 1\n ret['msg'] = '该用户不存在'\n\n except Exception as e:\n print(e)\n ret['code'] = 0\n ret['msg'] = '捕获异常'\n\n return Response(ret)\n\n\n\n\nfrom rest_framework.throttling import SimpleRateThrottle\nfrom rest_framework.versioning import QueryParameterVersioning\nfrom rest_framework.versioning import URLPathVersioning\n\n\n# 自定义版本类\n# class MyParthVersioning(object):\n# def datermint_version(self,request,*args,**kwargs):\n# # 获取用户传递的版本参数(version)\n# version = request._request.GET.get('version')\n# # version = request.query_params.get('version')\n# return version\n#\n#\n# 只有登录的用户才能查看所有的用户信息\n# class UserList(APIView):\n#\n# # 设置节流的类\n# # throttle_classes = [VisitThrottle,]\n#\n# # 取消节流\n# # throttle_classes = [SimpleRateThrottle]\n#\n# # 设置自定义版本类\n# # versioning_class = MyParthVersioning\n# # versioning_class = QueryParameterVersioning\n# versioning_class = URLPathVersioning\n#\n#\n#\n#\n# def get(self, request, *args, **kwargs):\n#\n# # request.version 版本号\n# # request.version_scheme => MyParthVersioning()\n# print(request.version,request.version_scheme)\n# #reverse反向生成url地址\n# url = request.version_scheme.reverse('userelist',request=request)\n#\n # ret = {\n # 'code': 1,\n # 'msg': '请求成功'\n # }\n# try:\n# queryset = models.User.objects.all()\n#\n# data = []\n# for model in queryset:\n# cbv_d = {}\n# cbv_d['name'] = model.name\n# cbv_d['type'] = model.type\n# cbv_d['birthday'] = model.birthday\n# data.append(cbv_d)\n#\n# ret['data'] = data\n#\n# # 获取用户信息\n# if type(request.user) == str:\n# user = request.user\n# else:\n# user = request.user.name\n#\n# auth = request.auth\n# ret['user'] = user\n# ret['auth'] = auth\n#\n# except Exception as e:\n# print(e)\n# ret['code'] = 0\n# ret['msg'] = '捕获异常'\n#\n# return Response(ret)\n\n\n\n\n#######################解析器######################\nfrom rest_framework.parsers import JSONParser,FormParser\n\nclass Order(APIView):\n permission_classes = []\n authentication_classes = []\n\n versioning_class = URLPathVersioning\n\n def post(self,request,*args,**kwargs):\n\n data = request.data\n print(data)\n\n return Response('订单接口')\n\n\n\n\n\n\n##############################序列化##################################\n\nfrom rest_framework import serializers\n\n########### 注册\n# class RegisterUserSerializer(serializers.Serializer):\n# # 用户名\n# name = serializers.CharField(error_messages=\n# {\n# 'required' :'用户名不能为空'\n# },\n#\n# )\n# # 密码\n# password = serializers.CharField(error_messages=\n# {\n# 'required' :'密码不能为空'\n# },\n# )\n#\n# # value -->password\n# def validate_password(self,value):\n# import re\n# from rest_framework.exceptions import ValidationError\n# rea = re.match(r'[A-Z]',value)\n# reb = re.search(r'[a-b]',value)\n# rec = re.search(r'[0-9]',value)\n# red = len(value)\n#\n# if rea:\n# # 判断首字母大写\n# if reb and rec and red > 7:\n# return value\n# else:\n# raise ValidationError('密码格式错误')\n# else:\n# raise ValidationError('首字母必须大写')\n#\n#\n# # 创建 更新 需要自己写方法\n# def create(self, validated_data):\n# instance = models.User.objects.create(**validated_data)\n# return instance\n\nclass RegisterUserSerializer(serializers.ModelSerializer):\n # 用户名\n name = serializers.CharField(error_messages=\n {\n 'required' :'用户名不能为空'\n },\n\n )\n # 密码\n password = serializers.CharField(error_messages=\n {\n 'required' :'密码不能为空'\n },\n )\n\n # value -->password\n def validate_password(self,value):\n import re\n from rest_framework.exceptions import ValidationError\n rea = re.match(r'[A-Z]',value)\n reb = re.search(r'[a-b]',value)\n rec = re.search(r'[0-9]',value)\n red = len(value)\n\n if rea:\n # 判断首字母大写\n if reb and rec and red > 7:\n return value\n else:\n raise ValidationError('密码格式错误')\n else:\n raise ValidationError('首字母必须大写')\n\n class Meta:\n pass\n # model = models.User\n\n\n\nclass Register(APIView):\n\n def post(self, request, *args, **kwargs):\n\n ret = {\n 'code': 1,\n 'msg': '注册成功'\n }\n\n # .data使用了解析器\n data = request.data\n # 进行序列化\n ser = RegisterUserSerializer(data=data)\n if ser.is_valid(): # 进行序列化验证\n print(ser.validated_data)\n # 保存验证后的数据(保存到数据库)\n ser.save()\n else:\n # 未通过验证\n print(ser.errors)\n ret['code'] = 0\n ret['msg'] = '注册失败'\n\n return Response(ret)\n\n\n\n\n########### 用户列表\nclass UserSerializer(serializers.Serializer):\n # id\n # id = serializers.IntegerField()\n url = serializers.HyperlinkedIdentityField(\n view_name='userdetail',lookup_field='id',\n lookup_url_kwarg='pk'\n )\n # 用户名\n name = serializers.CharField()\n # 用户类型 (普通类型,VIP类型)\n # type = serializers.IntegerField()\n type = serializers.CharField(source='get_type_display')\n\n\n\n########### 分页\n# DRF 提供了三种方式\nfrom rest_framework.pagination import PageNumberPagination\n# https://www.baidu.com/?kw=xxx&page=1&pagessize=10\n# 自定义分类\nclass MpPageNumberPagination(PageNumberPagination):\n # page_size 每页返回多少条数\n page_size = 5\n # 传递分页的参数\n page_query_param = 'page'\n # 传递每页返回多少数据的参数\n page_size_query_param = 'pagesize'\n # 每页返回数据的最大条数\n max_page_size = 10\n\n def get_paginated_response(self, data):\n ret = {\n 'code':1,\n 'count':self.page.paginator.count,\n 'previous':self.get_previous_link(),\n 'results': data\n }\n return Response(ret)\n\n\n# https://www.baidu.com/?kw=xxx&page=1&pagessize=10\n\n\n\nfrom django.forms import model_to_dict\nfrom util.authentication import MyBaseAuthentication\n\nclass UserList(APIView):\n\n # authentication_classes =[MyBaseAuthentication]\n\n def get(self,request,*args,**kwargs):\n\n ret = {\n 'code': 1,\n 'data': None\n }\n\n # 获取用户列表\n queryset = models.User.objects.all()\n\n # 实例化分页类\n pg = MpPageNumberPagination()\n pg_data = pg.paginate_queryset(queryset=queryset,request=request,view=self)\n\n # 实例化一个对象 把取出来的model序列化\n ser = UserSerializer(\n instance=pg_data,\n context={'request',request},\n many=True)\n\n # ret['data'] = ser.data\n # return Response(ret)\n return pg.get_paginated_response(ser.data)\n\n # queryset = models.User.objects.all('id','user','type')\n # print(type(queryset))\n # data = list(queryset)\n # print(data)\n\n\n\n\n########## 用户详情\nclass UserDetailSerializer(serializers.Serializer):\n # id\n id = serializers.IntegerField()\n # 用户名\n name = serializers.CharField()\n # 用户类型 (普通类型,VIP类型)\n # type = serializers.IntegerField()\n type = serializers.CharField(source='get_type_display')\n # 密码\n password = serializers.CharField()\n # 性别\n # gender = serializers.IntegerField()\n gender = serializers.CharField(source='get_gender_display')\n # 出生日期\n birthday = serializers.DateTimeField(format='%Y-%m-%d')\n\n\n\nclass UserDetailView(APIView):\n\n def get(self,request,*args,**kwargs):\n\n ret = {\n 'code': 1,\n 'data': None\n }\n\n # id = int(request._request.GET.get('id'))\n id = kwargs.get('pk')\n quertset = models.User.objects.filter(id=id).first()\n\n try:\n if quertset:\n # 用户存在\n # 序列化\n ser = UserDetailSerializer(instance=quertset,many=False)\n ret['data'] = ser.data\n else:\n ret['code'] = 0\n ret['msg'] = '用户不存在'\n\n except Exception as e:\n print(e)\n ret['code'] = 0\n ret['msg'] = '用户不存在'\n\n return Response(ret)\n\n\n\n#***************************** 视图 *****************************\n\nfrom rest_framework.generics import GenericAPIView\n\n# class UserGenericAPIView(GenericAPIView):\n# # 获取用户的列表\n# # 获取数据库中表里面的数据集\n# queryset = models.User.objects.all()\n# # 设置序列化的类\n# serializer_class = UserSerializer\n# # 设置分页\n# pagination_class = MpPageNumberPagination\n#\n# def get(self,request,*args,**kwargs):\n# data = self.get_queryset()\n# # 获取当前分页下的数据\n# pd_data = self.paginate_queryset(queryset=data)\n# # 调用get_serializer方法获取UserSerializer对象\n# ser = self.get_serializer(\n# instance = pd_data,many=True,\n# context={'request':request},\n# )\n#\n# # return Response(ser.data)\n# return self.get_paginated_response(ser.data)\n\nfrom rest_framework.viewsets import GenericViewSet\n\nclass UserGenericAPIView(GenericViewSet):\n # 获取用户的列表\n # 获取数据库中表里面的数据集\n queryset = models.User.objects.all()\n # 设置序列化的类\n serializer_class = UserSerializer\n # 设置分页\n pagination_class = MpPageNumberPagination\n\n def list(self, request, *args, **kwargs):\n data = self.get_queryset()\n # 获取当前分页下的数据\n pd_data = self.paginate_queryset(queryset=data)\n # 调用get_serializer方法获取UserSerializer对象\n ser = self.get_serializer(\n instance=pd_data, many=True,\n context={'request': request},\n )\n\n # return Response(ser.data)\n return self.get_paginated_response(ser.data) # 有上一页和下一页\n\n\n\n\n##### 最终的视图\nfrom rest_framework.mixins import ListModelMixin,\\\n CreateModelMixin,DestroyModelMixin,UpdateModelMixin,\\\n RetrieveModelMixin\n\n# CreateModelMixin: 新增数据 POST请求\n# DestroyModelMixin: 删除数据 DELETE请求\n# UpdateModelMixin: 更新数据 PUT请求\n# ListModelMixin: 获取列表数据 GET请求\n# RetrieveModelMixin: 获取详情数据 GET请求\n# 使用以上视图的时候一定要和GenericViewSet配合使用\n\nclass UserListSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.User\n fields = \"__all__\"\n\n# 渲染器模板\nfrom rest_framework.renderers import JSONRenderer,\\\n BrowsableAPIRenderer,AdminRenderer\n\n\n# 分别对应数据的增删该查\nclass UserListView(ListModelMixin,CreateModelMixin,DestroyModelMixin,UpdateModelMixin,RetrieveModelMixin,GenericViewSet):\n # 设置渲染器模板\n renderer_classes = [JSONRenderer,AdminRenderer]\n\n # 设置数据集\n queryset = models.User.objects.all()\n # 设置序列化类\n serializer_class = UserListSerializer\n # 设置分页类\n pagination_class = MpPageNumberPagination\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"前后端分离-vue-DRF/houfen_DRF-projects/15day/zhuce_denglu/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"346902617","text":"\"\"\"\nModule to manipulate ec2 resources\n\"\"\"\nimport datetime\nimport boto3\nimport secret\n\nACCESS_KEY_ID, SECRET_ACCESS_KEY = secret.getAccess()\nREGION = secret.getRegion()\n\nEC2R = boto3.resource(service_name='ec2', aws_access_key_id=ACCESS_KEY_ID,\n aws_secret_access_key=SECRET_ACCESS_KEY, region_name=REGION)\nEC2C = boto3.client(service_name='ec2', aws_access_key_id=ACCESS_KEY_ID,\n aws_secret_access_key=SECRET_ACCESS_KEY, region_name=REGION)\nELBC = boto3.client(service_name='elb', aws_access_key_id=ACCESS_KEY_ID,\n aws_secret_access_key=SECRET_ACCESS_KEY, region_name=REGION)\n\nSUPPORTC = boto3.client(service_name='support', aws_access_key_id=ACCESS_KEY_ID,\n aws_secret_access_key=SECRET_ACCESS_KEY, region_name=\"us-east-1\")\n\nDRY = True\n\n#EC2 Volumes\ndef getOldUnusedVols(verbose):\n \"\"\"Get List of volumes that are available and 30 days old at least\"\"\"\n res = []\n ec2volumes = EC2C.describe_volumes(Filters=[\n {\n 'Name': 'status',\n 'Values': [\n 'available',\n ],\n }]).get('Volumes', [])\n\n today = datetime.datetime.now(datetime.timezone.utc)\n days30 = today-datetime.timedelta(days=30)\n for vol in ec2volumes:\n if not 'Tags' in vol:\n if vol['CreateTime'] < days30:\n if verbose:\n res.append(vol['VolumeId']+\";\"+str(vol['CreateTime']))\n else:\n res.append(vol['VolumeId'])\n return res\n\ndef cleanupOldUnusedVols(verbose):\n \"\"\"Delete old unused volumes\"\"\"\n lvol = getOldUnusedVols(False)\n for vol in lvol:\n resp = EC2C.delete_volume(\n DryRun = DRY,\n VolumeId = vol\n )\n if verbose:\n print(\"Volume with id: \"+vol+\" deleted\")\n print(\"Delete \"+str(len(lvol))+\" volumes\")\n\n#EC2 Instances\ndef getInstance(verbose,instanceId):\n \"\"\"Simple function to get informations for an instance\"\"\"\n dinstance = EC2C.describe_instances(InstanceIds=[instanceId])\n return dinstance\n\ndef getUserInstances(verbose,user):\n \"\"\"Count number of instances for specific user\"\"\"\n nb = 0\n instances = EC2R.instances.filter(Filters=[{'Name':'tag:Owner', 'Values':[user]}])\n for instance in instances:\n nb += 1\n if verbose:\n server = str(instance.id)+\";\"+str(instance.instance_type)+\";\"+\\\n str(instance.state['Name'])+\";\"+str(instance.private_ip_address)+\";\"\n try:\n for tag in instance.tags:\n if tag['Key'] == 'Description':\n server += tag['Value']+\";\"\n if tag['Key'] == 'Owner':\n server += tag['Value']+\";\"\n if tag['Key'] == 'ManagedBy':\n server += tag['Value']+\";\"\n except:\n continue\n else:\n server = str(instance.id)+\";\"+str(instance.instance_type)+\";\"+\\\n str(instance.state['Name'])\n print(server)\n print(\"Found \"+str(nb)+\" instances\")\n\ndef listInstances(verbose):\n \"\"\"list all ec2 instances\"\"\"\n nb = 0\n for instance in EC2R.instances.all():\n if verbose:\n server = str(instance.id)+\":\"+str(instance.instance_type)+\",\"+\\\n str(instance.state['Name'])+\";\"+str(instance.private_ip_address)+\";\"\n nb += 1\n try:\n for tag in instance.tags:\n if tag['Key'] == 'Description':\n server += tag['Value']+\":\"\n if tag['Key'] == 'Owner':\n server += tag['Value']+\":\"\n if tag['Key'] == 'ManagedBy':\n server += tag['Value']+\":\"\n except:\n continue\n else:\n nb += 1\n server = str(instance.id)+\":\"+str(instance.instance_type)+\",\"+\\\n str(instance.state['Name'])\n print(server)\n print(\"Found \"+str(nb)+\" instances\")\n\ndef countInstanceByType(verbose):\n \"\"\"Count instances by flavors\"\"\"\n instancesByType = {}\n for instance in EC2R.instances.all():\n try:\n instancesByType[instance.instance_type] += 1\n except:\n instancesByType[instance.instance_type] = 1\n for k, v in instancesByType.items():\n print(k+\":\"+str(v))\n\ndef startInstance(instanceID):\n \"\"\"Simple method to start an instance\"\"\"\n response = client.start_instances(\n DryRun=DRY,\n InstanceIds=[\n instanceID,\n ],\n )\n\ndef stopInstance(instanceID):\n \"\"\"Simple method to stop an instance\"\"\"\n response = client.stop_instances(\n DryRun=DRY,\n InstanceIds=[\n instanceID,\n ],\n Force=True\n )\n\n#ELB\ndef listElb(verbose):\n \"\"\"List all ELB\"\"\"\n res = []\n delb = ELBC.describe_load_balancers()\n for elb in delb['LoadBalancerDescriptions']:\n if verbose:\n instances = \"\"\n for instance in elb['Instances']:\n instances += \",\"+instance['InstanceId']\n instances = instances[1:]\n res.append(elb['LoadBalancerName']+\";\"+','.join(elb['Subnets'])+\\\n \";\"+','.join(elb['AvailabilityZones'])+\";\"+instances)\n else:\n res.append(elb['LoadBalancerName']+\";\"+','.join(elb['Subnets']))\n return res\n\ndef getElbInstance(verbose,elbName):\n \"\"\"Return list of instances behind an elb\"\"\"\n linstances = []\n delb = ELBC.describe_load_balancers(\n LoadBalancerNames = [elbName]\n )\n linstances = delb['LoadBalancerDescriptions'][0]['Instances']\n return linstances\n\ndef getIdleELB(verbose):\n \"\"\"Get list of Idle ELB\"\"\"\n lIdleElb = []\n totalSavings = 0\n jResp = SUPPORTC.describe_trusted_advisor_checks(language=\"en\")\n for it in jResp['checks']:\n if it['category'] == 'cost_optimizing' and it['name'] == 'Idle Load Balancers':\n jResp2 = SUPPORTC.describe_trusted_advisor_check_result(checkId=str(it['id']),\n language=\"en\")\n for elb in jResp2['result']['flaggedResources']:\n if 'No active back-end instances' in elb['metadata']:\n linstances = ec2.getElbInstance(False,elb['metadata'][1])\n if len(linstances) == 0:#if no instances\n lIdleElb.append(elb['metadata'][1])\n totalSavings += float(elb['metadata'][3][1:])\n for instance in linstances:#search if instance still exist\n haveInstance = True\n try:\n dinstance = ec2.getInstance(False,instance['InstanceId'])\n haveInstance = True\n except Exception as e:\n if re.search('InvalidInstanceID.NotFound', str(e)):\n haveInstance = False\n if not haveInstance:\n lIdleElb.append(elb['metadata'][1])\n totalSavings += float(elb['metadata'][3][1:])\n print(\"You can save up to \"+str(totalSavings)+\"$\")\n return lIdleElb\n\ndef deleteELB(verbose,elbName):\n \"\"\"Delete a RDS instance\"\"\"\n ELBC.delete_load_balancer(LoadBalancerName=elbName)\n if verbose:\n print(\"ELB with name: \"+str(elbName)+\" deleted\")\n\ndef cleanupELB(verbose):\n \"\"\"Delete all Idle ELB\"\"\"\n lelb = getIdleELB(False)\n for elb in lelb:\n deleteELB(verbose,elb)\n","sub_path":"AWS/ec2.py","file_name":"ec2.py","file_ext":"py","file_size_in_byte":7573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"551337003","text":"import Basic_WS.list.RandomCharacter as RC\n\n# 문자 리스트를 생성한다.\ndef createList():\n chars = [] # 빈리스트 생성\n\n # 소문자를 생성하고 리스트에 추가한다.\n for i in range(100):\n chars.append(RC.getRandomLowerCaseLetter())\n\n return chars\n\n# 문자 리스트를 출력한다.\ndef displayList(chars):\n for i in range(len(chars)):\n if(i + 1) % 20 == 0:\n print(chars[i])\n else:\n print(chars[i], end=' ')\n\n# 각 문자의 빈도수를 센다\ndef cntLetters(chars):\n # 0으로 초기화된 26개의 정수 리스트를 생성한다.\n counts = 26 * [0]\n\n # 각 리스트의 소문자를 센다\n for i in range(len(chars)):\n counts[ord(chars[i]) - ord('a')] += 1\n\n return counts\n\n# 각 문자의 빈도수를 출력한다\ndef displayCounts(counts):\n for i in range(len(counts)):\n if(i+1) % 10 == 0:\n print(counts[i], chr(i + ord('a')))\n else:\n print(counts[i], chr(i + ord('a')), end=' ')\n\ndef main():\n list = createList()\n displayList(list)\n print()\n counts = cntLetters(list)\n displayCounts(counts)\n\nmain()","sub_path":"Basic_WS/list/CountLettersInList.py","file_name":"CountLettersInList.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"74525300","text":"# -*- coding: utf-8 -*-\nfrom odoo import fields, models, api\n\n\nclass Partner(models.Model):\n _inherit = 'res.partner'\n\n @api.model\n def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):\n \"\"\"跨公司调拨选择业务伙伴时,只选择公司关联的res.partner\"\"\"\n if 'across_move_partner' in self._context:\n company_id = self._context.get('company_id', self.env.user.company_id.id)\n args = args or []\n\n ids = self.env['res.company'].sudo().search([('id', '!=', company_id)]).mapped('partner_id').ids\n args.append(('id', 'in', ids))\n\n return super(Partner, self)._search(args, offset=offset, limit=limit, order=order, count=count, access_rights_uid=access_rights_uid)\n\n\n\n","sub_path":"myaddons/cj_stock/models/partner.py","file_name":"partner.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"503780798","text":"import os\n\nimport requests as rq\nfrom babel import Locale\nfrom basic_task.tasks import send_new_hook_notification\nfrom figure_hook.Models import Webhook\nfrom flask import Blueprint, flash, redirect, request\nfrom flask.globals import session\nfrom flask.helpers import url_for\nfrom flask_babel import force_locale, gettext\n\nblueprint = Blueprint(\"auth\", __name__)\n\nAPI_ENDPOINT = \"https://discord.com/api/v8\"\n\n\n@blueprint.route(\"/webhook\", methods=[\"GET\"])\ndef webhook():\n args = request.args.to_dict()\n\n if \"error\" in args:\n return redirect(url_for(\"public.home\"))\n\n r = exchange_token(args[\"code\"])\n state = args[\"state\"]\n\n if r.status_code == 200 and check_state(state):\n webhook_response = r.json()\n webhook_channel_id = webhook_response['webhook']['channel_id']\n webhook_id = webhook_response['webhook'][\"id\"]\n webhook_token = webhook_response['webhook']['token']\n webhook_setting = session['webhook_setting']\n save_webhook_info(webhook_channel_id, webhook_id, webhook_token, **webhook_setting)\n\n with force_locale(Locale.parse(webhook_setting['lang'], sep='-')):\n msg = gettext(\"FigureHook hooked on this channel.\")\n\n send_hook_noti(webhook_id, webhook_token, msg)\n flash(gettext(\"Hooking success!\"), 'success')\n\n elif r.status_code >= 400:\n error = r.json()\n if 'code' in error:\n if error['code'] == 30007:\n flash(error['message'], 'danger')\n flash(gettext(\"Webhook authorization failed.\"), 'warning')\n\n return redirect(session['entry_uri'])\n\n\ndef send_hook_noti(webhook_id, webhook_token, msg):\n send_new_hook_notification.apply_async(kwargs={\n 'webhook_id': webhook_id,\n 'webhook_token': webhook_token,\n 'msg': msg\n })\n\n\ndef exchange_token(code):\n data = {\n \"client_id\": os.getenv(\"DISCORD_CLIENT_ID\"),\n \"client_secret\": os.getenv(\"DISCORD_CLIENT_SECRET\"),\n \"grant_type\": \"authorization_code\",\n \"code\": code,\n \"redirect_uri\": url_for(\"auth.webhook\", _external=True, _scheme='https')\n }\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n r = rq.post(f\"{API_ENDPOINT}/oauth2/token\", data=data, headers=headers)\n return r\n\n\ndef save_webhook_info(channel_id, _id, token, **kwargs):\n the_hook = Webhook.get_by_channel_id(channel_id)\n if the_hook:\n the_hook.update(id=_id, token=token, **kwargs)\n if not the_hook:\n the_hook = Webhook.create(channel_id=channel_id, id=_id, token=token, **kwargs)\n the_hook.session.commit()\n\n\ndef check_state(state):\n return session.get('state') == state\n","sub_path":"Services/web/controllers/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"1712938","text":"# coding:utf-8\n\nn = int(input())\na = list(map(int, input().split()))\n\nret = 0\nfor i in range(n):\n ret += 1\n x = 0\n while (2**(x+1))<=a[i]:\n x += 1\n for y in range(i+1, n, 1):\n if a[y]&(2**x):\n a[y] ^= a[i]\n\nprint(2**ret)\n","sub_path":"yukicoder/183.py","file_name":"183.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"645356769","text":"import math\nimport collections\nimport heapq\nimport bisect\nimport functools\n\nm, n, q = map(int, input().strip().split(\" \"))\na = []\nfor i in range(m):\n a.append([int(c) for c in input()])\np = [[0 for j in range(n + 1)] for i in range(m + 1)]\nfor i in range(m):\n for j in range(n):\n p[i + 1][j + 1] = p[i + 1][j] + p[i][j + 1] - p[i][j] + a[i][j]\ni1, j1, i2, j2 = 0, 0, m - 1, n - 1\nmi1, mj1, mi2, mj2 = 0, 0, m - 1, n - 1\ndx, dy = 0, 0\nstop = False\nfor _ in range(q):\n arr = list(map(int, input().split()))\n if len(arr) == 1:\n if stop:\n print(0)\n else:\n print(max(p[mi2 + 1][mj2 + 1] - p[mi1][mj2 + 1] - p[mi2 + 1][mj1] + p[mi1][mj1], 0))\n elif not stop:\n x, y = arr[2], arr[1]\n i1 += x\n i2 += x\n j1 += y\n j2 += y\n mi1 = max(mi1, i1, 0)\n mi2 = min(mi2, i2, m - 1)\n mj1 = max(mj1, j1, 0)\n mj2 = min(mj2, j2, n - 1)\n if i1 == m or i2 == -1 or j1 == n or j2 == -1 or mi1 > mi2 or mj1 > mj2:\n stop = True\n","sub_path":"hackerearth/2022/basic_implementation/mind-flayer-returns.py","file_name":"mind-flayer-returns.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"426427082","text":"import torch\nimport torch.autograd as autograd\nimport torch.nn as nn\n\n\nclass PolicyNetwork(nn.Module):\n \"\"\"Policy Network.\"\"\"\n \n def __init__(self, state_size, action_size, seed):\n \"\"\"Initialize parameters and build model.\"\"\"\n \n super(PolicyNetwork, self).__init__()\n \n self.seed = torch.manual_seed(seed)\n \n self.state_size = state_size\n self.action_size = action_size\n \n self.affine1_layer = nn.Linear(self.state_size, 64)\n self.affine2_layer = nn.Linear(64, 64)\n \n self.action_mean = nn.Linear(64, self.action_size)\n self.action_mean.weight.data.mul_(0.1)\n self.action_mean.bias.data.mul_(0.0)\n \n self.action_log_std = nn.Parameter(torch.zeros(1, self.action_size))\n \n self.saved_actions = []\n self.rewards = []\n self.final_value = 0\n \n def forward(self, state):\n \n x = torch.tanh(self.affine1_layer(state))\n x = torch.tanh(self.affine2_layer(x))\n \n action_mean = self.action_mean(x)\n action_log_std = self.action_log_std.expand_as(action_mean)\n action_std = torch.exp(action_log_std)\n \n return action_mean, action_log_std, action_std\n \nclass ValueNetwork(nn.Module):\n \"\"\"Value Network.\"\"\"\n \n def __init__(self, state_size, seed):\n \"\"\"Initialize parameters and build model.\"\"\"\n \n super(ValueNetwork, self).__init__()\n \n self.seed = torch.manual_seed(seed)\n self.state_size = state_size\n \n self.affine1_layer = nn.Linear(self.state_size, 64)\n self.affine2_layer = nn.Linear(64, 64)\n \n self.value_head = nn.Linear(64, 1)\n self.value_head.weight.data.mul_(0.1)\n self.value_head.bias.data.mul_(0.0)\n \n def forward(self, x):\n \n x = torch.tanh(self.affine1_layer(x))\n x = torch.tanh(self.affine2_layer(x))\n \n Qsa = self.value_head(x)\n \n return Qsa\n ","sub_path":"02-deep-learning/03-reinforcement-learning/03-policy-gradient-learning/02-trpo/bipedal-walker-hardcore/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"202028821","text":"# -*- coding:utf-8 -*-\n\nimport requests\nimport datetime\nimport json\nimport re\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util import Retry\n\n\nclass EastFund():\n \"\"\" 从东方基金获取基金价格\n\n Attributes:\n fid 为基金编码,nav为基金净值,nav2为累计净值。\n \"\"\"\n\n def __init__(self, fid):\n self.fid = str(fid)\n self.s = requests.Session()\n self.s.mount('https://', HTTPAdapter(max_retries=Retry(total=5, method_whitelist=frozenset(['GET', 'POST']))))\n self.s.mount('http://', HTTPAdapter(max_retries=Retry(total=5, method_whitelist=frozenset(['GET', 'POST']))))\n self.price_list = {}\n self.revert_list = {}\n self.record_path = './record.' + str(fid)\n self.buylog_path = './buylog.' + str(fid)\n self.revert_path = './revert.' + str(fid)\n\n def parse_jsonp(self, response):\n return json.loads(\n re.match(\n r'[^(]*[(]({.*})[)][^)]*',\n response.content.decode('utf-8'),\n re.S).group(1))\n\n def get_fundprice(self, start_date=None, end_date=None):\n \"\"\" 获取指定基金的净值,可以获取当前净值和累计净值 \"\"\"\n sdate = '' if start_date is None else start_date.strftime('%Y-%m-%d')\n edate = '' if end_date is None else end_date.strftime('%Y-%m-%d')\n fid = self.fid\n result = []\n url = 'http://api.fund.eastmoney.com/f10/lsjz?callback=jQuery&pageIndex=1&'\n url += 'pageSize=20&startDate={}&endDate={}&fundCode={}'.format(sdate, edate, fid)\n header = {}\n header['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n header['User-Agent'] += 'AppleWebKit/537.36 (KHTML, like Gecko) '\n header['User-Agent'] += 'Chrome/79.0.3945.130 Safari/537.36'\n header['Referer'] = 'http://fundf10.eastmoney.com/jjjz_' + fid + '.html'\n res = self.s.get(url=url, headers=header, timeout=3)\n total_number = self.parse_jsonp(res)['TotalCount']\n if total_number > 20:\n url = 'http://api.fund.eastmoney.com/f10/lsjz?callback=jQuery&pageIndex=1&'\n url += 'pageSize={}&startDate={}&endDate={}&fundCode={}'.format(str(total_number), sdate, edate, fid)\n res = self.s.get(url=url, headers=header, timeout=3)\n finfo = self.parse_jsonp(res)['Data']['LSJZList']\n for f in finfo:\n try:\n result.append((fid, f['FSRQ'], float(f['DWJZ']), float(f['LJJZ'])))\n except Exception as e:\n print(e)\n return result\n\n def get_revert(self, end_date=None, cur_price=-1, days=360):\n \"\"\" 获取指定基金的历史回撤 \"\"\"\n if end_date is None:\n # 时间点为当前日期,如果有估值则使用估值,否则重新估值。\n if cur_price < 0:\n price = self.get_gz()\n else:\n price = (0, cur_price)\n n = datetime.datetime.now()\n end_date = datetime.datetime(n.year, n.month, n.day, 0, 0, 0)\n else:\n price = self.price_list.get(end_date, (0, 0))\n if price[1] == 0:\n return 0\n p_list = []\n for i in range(1, days):\n d = end_date - datetime.timedelta(days=i)\n p_list.append(self.price_list.get(d, (0, 0))[1])\n if max(p_list) <= price[1]:\n return 0\n else:\n return round((max(p_list) - price[1]) / max(p_list) * 100, 4)\n\n def save_fundprice(self, fprice):\n \"\"\" 保存基金的净值,可以获取当前净值和累计净值 \"\"\"\n with open(self.record_path, 'w') as fw:\n for d in sorted(fprice.keys()):\n line = '{},{},{},{}'.format(\n self.fid,\n d.strftime('%Y-%m-%d'),\n fprice[d][0],\n fprice[d][1],\n )\n fw.write(line)\n fw.write('\\n')\n\n def save_revert(self, fprice):\n \"\"\" 保存基金的回撤 \"\"\"\n with open(self.revert_path, 'w') as fw:\n for d in sorted(fprice.keys()):\n line = '{},{},{}'.format(\n self.fid,\n d.strftime('%Y-%m-%d'),\n fprice[d],\n )\n fw.write(line)\n fw.write('\\n')\n\n def load_fundprice(self, end_date=None):\n \"\"\" 加载基金净值,最多加载到前一天 \"\"\"\n result = {}\n if end_date is None:\n n = datetime.datetime.now() - datetime.timedelta(days=1)\n end_date = datetime.datetime(n.year, n.month, n.day, 0, 0, 0)\n max_dt = datetime.datetime(1970, 1, 1)\n try:\n fr = open(self.record_path, 'r')\n for line in fr.readlines():\n arr = line.strip().split(',')\n d = datetime.datetime.strptime(arr[1], '%Y-%m-%d')\n max_dt = max(d, max_dt)\n result[d] = (float(arr[2]), float(arr[3]))\n fr.close()\n if end_date <= max_dt:\n print('No need fetch new fundprice')\n self.price_list = result\n return result\n else:\n print('Need fetch new fundprice')\n fprice = self.get_fundprice(max_dt, end_date)\n for arr in fprice:\n d = datetime.datetime.strptime(arr[1], '%Y-%m-%d')\n result[d] = (float(arr[2]), float(arr[3]))\n self.save_fundprice(result)\n self.price_list = result\n return result\n except Exception:\n print('First fetch fundprice')\n fprice = self.get_fundprice()\n for arr in fprice:\n d = datetime.datetime.strptime(arr[1], '%Y-%m-%d')\n result[d] = (float(arr[2]), float(arr[3]))\n self.save_fundprice(result)\n self.price_list = result\n return result\n\n def load_recoup(self, end_date=None):\n \"\"\" 计算基金的最长回本时间 \"\"\"\n result = {}\n if end_date is None:\n n = datetime.datetime.now() - datetime.timedelta(days=1)\n end_date = datetime.datetime(n.year, n.month, n.day, 0, 0, 0)\n max_dt = max(self.price_list.keys())\n min_dt = min(self.price_list.keys())\n for i in range(0, (end_date - min_dt).days):\n dt = end_date - datetime.timedelta(days=i)\n if dt not in self.price_list.keys():\n continue\n result[dt] = {}\n for j in range(0, (max_dt - dt).days):\n d = max_dt - datetime.timedelta(days=j)\n if d in self.price_list and self.price_list[d][1] > 0 and self.price_list[d][1] <= self.price_list[dt][1]:\n result[dt]['e_date'] = d\n result[dt]['length'] = (d - dt).days\n break\n return result\n\n def load_revert(self, end_date=None, days=360):\n result = {}\n if end_date is None:\n n = datetime.datetime.now() - datetime.timedelta(days=1)\n end_date = datetime.datetime(n.year, n.month, n.day, 0, 0, 0)\n max_dt = datetime.datetime(1970, 1, 1)\n try:\n fr = open(self.revert_path, 'r')\n for line in fr.readlines():\n arr = line.strip().split(',')\n d = datetime.datetime.strptime(arr[1], '%Y-%m-%d')\n max_dt = max(d, max_dt)\n result[d] = float(arr[2])\n fr.close()\n if end_date <= max_dt:\n print('No need fetch new record')\n self.revert_list = result\n return result\n else:\n print('Need fetch new record')\n for i in range(0, (end_date - max_dt).days):\n d = max_dt + datetime.timedelta(days=i)\n result[d] = self.get_revert(d, days)\n self.save_revert(result)\n self.revert_list = result\n return result\n except Exception:\n print('First fetch revert')\n for i in range(0, 3600):\n d = end_date - datetime.timedelta(days=i)\n result[d] = self.get_revert(d, days)\n self.save_revert(result)\n self.revert_list = result\n return result\n\n def get_delta_price(self, end_date=None):\n price = self.load_fundprice(end_date)\n max_dt = max(price.keys())\n # 基金分红\n delta_price = price[max_dt][1] - price[max_dt][0]\n flag = True\n # 基金分拆\n if self.fid in ('160218', '161725', '162412', '163406'):\n delta_price = price[max_dt][1] / price[max_dt][0]\n flag = False\n return (delta_price, flag)\n\n def get_gz(self):\n \"\"\" 获取当前时间的估值 \"\"\"\n fid = self.fid\n url = 'http://fundgz.1234567.com.cn/js/' + fid + '.js'\n header = {}\n header['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'\n header['User-Agent'] += ' AppleWebKit/537.36 (KHTML, like Gecko)'\n header['User-Agent'] += ' Chrome/79.0.3945.130 Safari/537.36'\n (delta_price, flag) = self.get_delta_price()\n try:\n res = self.s.get(url=url, headers=header, timeout=3)\n gz_dict = self.parse_jsonp(res)\n dnow = datetime.datetime.now().strftime('%Y-%m-%d')\n if dnow != gz_dict['gztime'].split(' ')[0]:\n return (0, 0)\n if flag is True:\n return (float(gz_dict['gsz']), round(float(gz_dict['gsz']) + delta_price, 4))\n else:\n return (float(gz_dict['gsz']), round(float(gz_dict['gsz']) * delta_price, 4))\n except Exception as e:\n print(e)\n return (0, 0)\n\n def get_avg_price(self, end_date, day=365):\n \"\"\" 获取1年的均值。\n \"\"\"\n total = [0, 0]\n prices = [\n self.price_list[end_date-datetime.timedelta(days=i)]\n for i in range(1, day)\n if end_date-datetime.timedelta(days=i) in self.price_list\n ]\n if prices == []:\n return (0, 0)\n for i in prices:\n total[0] = total[0] + i[0]\n total[1] = total[1] + i[1]\n return (total[0]/len(prices), total[1]/len(prices))\n\n\nif __name__ == '__main__':\n index_code = '000215'\n # index_code = '519062'\n ef = EastFund(index_code)\n ef.load_fundprice()\n ef.load_revert()\n recoup = ef.load_recoup(None)\n for i in recoup:\n print(i.strftime('%Y%m%d-'), recoup[i])\n","sub_path":"eastfund.py","file_name":"eastfund.py","file_ext":"py","file_size_in_byte":10672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"110453368","text":"\"\"\"\nlvvit.py (07-06-21)\nhttps://github.com/zihangJiang/TokenLabeling/blob/4a734ea96a3b36daebf1446c1cb2e0010a8e5f48/models/lvvit.py\n\nlayers.py (07-06-21)\nhttps://github.com/zihangJiang/TokenLabeling/blob/main/models/layers.py\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom functools import partial\nimport torch.nn.init as init\nimport torch.nn.functional as F\nimport math\nfrom timm.models.layers import DropPath, to_2tuple\n\nDROPOUT_FLOPS = 4\nLAYER_NORM_FLOPS = 5\nACTIVATION_FLOPS = 8\nSOFTMAX_FLOPS = 5\n\n\nclass GroupLinear(nn.Module):\n \"\"\"\n Group Linear operator\n \"\"\"\n\n def __init__(self, in_planes, out_channels, groups=1, bias=True):\n super(GroupLinear, self).__init__()\n assert in_planes % groups == 0\n assert out_channels % groups == 0\n self.in_dim = in_planes\n self.out_dim = out_channels\n self.groups = groups\n self.bias = bias\n self.group_in_dim = int(self.in_dim / self.groups)\n self.group_out_dim = int(self.out_dim / self.groups)\n\n self.group_weight = nn.Parameter(\n torch.zeros(self.groups, self.group_in_dim, self.group_out_dim)\n )\n self.group_bias = nn.Parameter(torch.zeros(self.out_dim))\n\n def forward(self, x):\n t, b, d = x.size()\n x = x.view(t, b, self.groups, int(d / self.groups))\n out = (\n torch.einsum(\"tbgd,gdf->tbgf\", (x, self.group_weight)).reshape(\n t, b, self.out_dim\n )\n + self.group_bias\n )\n return out\n\n def extra_repr(self):\n s = \"{in_dim}, {out_dim}\"\n if self.groups != 1:\n s += \", groups={groups}\"\n if self.bias is None:\n s += \", bias=False\"\n return s.format(**self.__dict__)\n\n\nclass Mlp(nn.Module):\n \"\"\"\n MLP with support to use group linear operator\n \"\"\"\n\n def __init__(\n self,\n in_features,\n hidden_features=None,\n out_features=None,\n act_layer=nn.GELU,\n drop=0.0,\n group=1,\n ):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n if group == 1:\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.fc2 = nn.Linear(hidden_features, out_features)\n else:\n self.fc1 = GroupLinear(in_features, hidden_features, group)\n self.fc2 = GroupLinear(hidden_features, out_features, group)\n self.act = act_layer()\n\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\nclass GroupNorm(nn.Module):\n def __init__(self, num_groups, embed_dim, eps=1e-5, affine=True):\n super().__init__()\n self.gn = nn.GroupNorm(num_groups, embed_dim, eps, affine)\n\n def forward(self, x):\n B, T, C = x.shape\n x = x.view(B * T, C)\n x = self.gn(x)\n x = x.view(B, T, C)\n return x\n\n\nclass Attention(nn.Module):\n \"\"\"\n Multi-head self-attention\n from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py\n with some modification to support different num_heads and head_dim.\n \"\"\"\n\n def __init__(\n self,\n dim,\n num_heads=8,\n head_dim=None,\n qkv_bias=False,\n qk_scale=None,\n attn_drop=0.0,\n proj_drop=0.0,\n ):\n super().__init__()\n self.num_heads = num_heads\n if head_dim is not None:\n self.head_dim = head_dim\n else:\n head_dim = dim // num_heads\n self.head_dim = head_dim\n self.scale = qk_scale or head_dim**-0.5\n\n self.qkv = nn.Linear(dim, self.head_dim * self.num_heads * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(self.head_dim * self.num_heads, dim)\n self.proj_drop = nn.Dropout(proj_drop)\n\n def forward(self, x, padding_mask=None):\n B, N, C = x.shape\n qkv = (\n self.qkv(x)\n .reshape(B, N, 3, self.num_heads, self.head_dim)\n .permute(2, 0, 3, 1, 4)\n )\n # B,heads,N,C/heads\n q, k, v = qkv[0], qkv[1], qkv[2]\n\n # trick here to make q@k.t more stable\n attn = (q * self.scale) @ k.transpose(-2, -1)\n if padding_mask is not None:\n attn = attn.view(B, self.num_heads, N, N)\n attn = attn.masked_fill(\n padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),\n float(\"-inf\"),\n )\n attn_float = attn.softmax(dim=-1, dtype=torch.float32)\n attn = attn_float.type_as(attn)\n else:\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, self.head_dim * self.num_heads)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n\nclass Block(nn.Module):\n \"\"\"\n Pre-layernorm transformer block\n \"\"\"\n\n def __init__(\n self,\n dim,\n num_heads,\n head_dim=None,\n mlp_ratio=4.0,\n qkv_bias=False,\n qk_scale=None,\n drop=0.0,\n attn_drop=0.0,\n drop_path=0.0,\n act_layer=nn.GELU,\n norm_layer=nn.LayerNorm,\n group=1,\n skip_lam=1.0,\n ):\n super().__init__()\n self.dim = dim\n self.mlp_hidden_dim = int(dim * mlp_ratio)\n self.skip_lam = skip_lam\n\n self.norm1 = norm_layer(dim)\n self.attn = Attention(\n dim,\n num_heads=num_heads,\n head_dim=head_dim,\n qkv_bias=qkv_bias,\n qk_scale=qk_scale,\n attn_drop=attn_drop,\n proj_drop=drop,\n )\n self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()\n self.norm2 = norm_layer(dim)\n self.mlp = Mlp(\n in_features=dim,\n hidden_features=self.mlp_hidden_dim,\n act_layer=act_layer,\n drop=drop,\n group=group,\n )\n\n def forward(self, x, padding_mask=None):\n x = x + self.drop_path(self.attn(self.norm1(x), padding_mask)) / self.skip_lam\n x = x + self.drop_path(self.mlp(self.norm2(x))) / self.skip_lam\n return x\n\n def flops(self, s):\n heads = self.attn.num_heads\n h = self.dim\n i = self.mlp_hidden_dim\n mha_block_flops = dict(\n kqv=3 * h * h,\n attention_scores=h * s,\n attn_softmax=SOFTMAX_FLOPS * s * heads,\n attention_dropout=DROPOUT_FLOPS * s * heads,\n attention_scale=s * heads,\n attention_weighted_avg_values=h * s,\n attn_output=h * h,\n attn_output_bias=h,\n attn_output_dropout=DROPOUT_FLOPS * h,\n attn_output_residual=h,\n attn_output_layer_norm=LAYER_NORM_FLOPS * h,\n )\n ffn_block_flops = dict(\n intermediate=h * i,\n intermediate_act=ACTIVATION_FLOPS * i,\n intermediate_bias=i,\n output=h * i,\n output_bias=h,\n output_dropout=DROPOUT_FLOPS * h,\n output_residual=h,\n output_layer_norm=LAYER_NORM_FLOPS * h,\n )\n\n return sum(mha_block_flops.values()) * s + sum(ffn_block_flops.values()) * s\n\n\nclass MHABlock(nn.Module):\n \"\"\"\n Multihead Attention block with residual branch\n \"\"\"\n\n def __init__(\n self,\n dim,\n num_heads,\n head_dim=None,\n mlp_ratio=4.0,\n qkv_bias=False,\n qk_scale=None,\n drop=0.0,\n attn_drop=0.0,\n drop_path=0.0,\n act_layer=nn.GELU,\n norm_layer=nn.LayerNorm,\n group=1,\n skip_lam=1.0,\n ):\n super().__init__()\n self.dim = dim\n self.norm1 = norm_layer(dim)\n self.skip_lam = skip_lam\n self.attn = Attention(\n dim,\n num_heads=num_heads,\n head_dim=head_dim,\n qkv_bias=qkv_bias,\n qk_scale=qk_scale,\n attn_drop=attn_drop,\n proj_drop=drop,\n )\n self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()\n\n def forward(self, x, padding_mask=None):\n x = (\n x\n + self.drop_path(self.attn(self.norm1(x * self.skip_lam), padding_mask))\n / self.skip_lam\n )\n return x\n\n def flops(self, s):\n heads = self.attn.num_heads\n h = self.dim\n block_flops = dict(\n kqv=3 * h * h,\n attention_scores=h * s,\n attn_softmax=SOFTMAX_FLOPS * s * heads,\n attention_dropout=DROPOUT_FLOPS * s * heads,\n attention_scale=s * heads,\n attention_weighted_avg_values=h * s,\n attn_output=h * h,\n attn_output_bias=h,\n attn_output_dropout=DROPOUT_FLOPS * h,\n attn_output_residual=h,\n attn_output_layer_norm=LAYER_NORM_FLOPS * h,\n )\n\n return sum(block_flops.values()) * s\n\n\nclass FFNBlock(nn.Module):\n \"\"\"\n Feed forward network with residual branch\n \"\"\"\n\n def __init__(\n self,\n dim,\n num_heads,\n head_dim=None,\n mlp_ratio=4.0,\n qkv_bias=False,\n qk_scale=None,\n drop=0.0,\n attn_drop=0.0,\n drop_path=0.0,\n act_layer=nn.GELU,\n norm_layer=nn.LayerNorm,\n group=1,\n skip_lam=1.0,\n ):\n super().__init__()\n self.skip_lam = skip_lam\n self.dim = dim\n self.mlp_hidden_dim = int(dim * mlp_ratio)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()\n self.norm2 = norm_layer(dim)\n self.mlp = Mlp(\n in_features=dim,\n hidden_features=self.mlp_hidden_dim,\n act_layer=act_layer,\n drop=drop,\n group=group,\n )\n\n def forward(self, x):\n x = x + self.drop_path(self.mlp(self.norm2(x * self.skip_lam))) / self.skip_lam\n return x\n\n def flops(self, s):\n heads = self.attn.num_heads\n h = self.dim\n i = self.mlp_hidden_dim\n block_flops = dict(\n intermediate=h * i,\n intermediate_act=ACTIVATION_FLOPS * i,\n intermediate_bias=i,\n output=h * i,\n output_bias=h,\n output_dropout=DROPOUT_FLOPS * h,\n output_residual=h,\n output_layer_norm=LAYER_NORM_FLOPS * h,\n )\n\n return sum(block_flops.values()) * s\n\n\nclass HybridEmbed(nn.Module):\n \"\"\"CNN Feature Map Embedding\n Extract feature map from CNN, flatten, project to embedding dim.\n from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py\n \"\"\"\n\n def __init__(\n self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768\n ):\n super().__init__()\n assert isinstance(backbone, nn.Module)\n img_size = to_2tuple(img_size)\n self.img_size = img_size\n self.backbone = backbone\n if feature_size is None:\n with torch.no_grad():\n training = backbone.training\n if training:\n backbone.eval()\n o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[\n -1\n ]\n feature_size = o.shape[-2:]\n feature_dim = o.shape[1]\n backbone.train(training)\n else:\n feature_size = to_2tuple(feature_size)\n feature_dim = self.backbone.feature_info.channels()[-1]\n self.num_patches = feature_size[0] * feature_size[1]\n self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=1)\n\n def forward(self, x):\n x = self.backbone(x)[-1]\n x = self.proj(x)\n return x\n\n\nclass PatchEmbedNaive(nn.Module):\n \"\"\"\n Image to Patch Embedding\n from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):\n super().__init__()\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])\n self.img_size = img_size\n self.patch_size = patch_size\n self.num_patches = num_patches\n self.embed_dim = embed_dim\n\n self.proj = nn.Conv2d(\n in_chans, embed_dim, kernel_size=patch_size, stride=patch_size\n )\n\n def forward(self, x):\n B, C, H, W = x.shape\n assert (\n H == self.img_size[0] and W == self.img_size[1]\n ), f\"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).\"\n x = self.proj(x)\n return x\n\n def flops(self):\n img_size = self.img_size[0]\n block_flops = dict(\n proj=img_size * img_size * 3 * self.embed_dim,\n )\n return sum(block_flops.values())\n\n\nclass PatchEmbed4_2(nn.Module):\n \"\"\"\n Image to Patch Embedding with 4 layer convolution\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):\n super().__init__()\n\n new_patch_size = to_2tuple(patch_size // 2)\n\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])\n self.img_size = img_size\n self.patch_size = patch_size\n self.num_patches = num_patches\n self.embed_dim = embed_dim\n\n self.conv1 = nn.Conv2d(\n in_chans, 64, kernel_size=7, stride=2, padding=3, bias=False\n ) # 112x112\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(\n 64, 64, kernel_size=3, stride=1, padding=1, bias=False\n ) # 112x112\n self.bn2 = nn.BatchNorm2d(64)\n self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn3 = nn.BatchNorm2d(64)\n\n self.proj = nn.Conv2d(\n 64, embed_dim, kernel_size=new_patch_size, stride=new_patch_size\n )\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n\n x = self.conv3(x)\n x = self.bn3(x)\n x = self.relu(x)\n\n x = self.proj(x) # [B, C, W, H]\n\n return x\n\n def flops(self):\n img_size = self.img_size[0]\n block_flops = dict(\n conv1=img_size / 2 * img_size / 2 * 3 * 64 * 7 * 7,\n conv2=img_size / 2 * img_size / 2 * 64 * 64 * 3 * 3,\n conv3=img_size / 2 * img_size / 2 * 64 * 64 * 3 * 3,\n proj=img_size / 2 * img_size / 2 * 64 * self.embed_dim,\n )\n return sum(block_flops.values())\n\n\nclass PatchEmbed4_2_128(nn.Module):\n \"\"\"\n Image to Patch Embedding with 4 layer convolution and 128 filters\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):\n super().__init__()\n\n new_patch_size = to_2tuple(patch_size // 2)\n\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])\n self.img_size = img_size\n self.patch_size = patch_size\n self.num_patches = num_patches\n self.embed_dim = embed_dim\n\n self.conv1 = nn.Conv2d(\n in_chans, 128, kernel_size=7, stride=2, padding=3, bias=False\n ) # 112x112\n self.bn1 = nn.BatchNorm2d(128)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(\n 128, 128, kernel_size=3, stride=1, padding=1, bias=False\n ) # 112x112\n self.bn2 = nn.BatchNorm2d(128)\n self.conv3 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn3 = nn.BatchNorm2d(128)\n\n self.proj = nn.Conv2d(\n 128, embed_dim, kernel_size=new_patch_size, stride=new_patch_size\n )\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n\n x = self.conv3(x)\n x = self.bn3(x)\n x = self.relu(x)\n\n x = self.proj(x) # [B, C, W, H]\n\n return x\n\n def flops(self):\n img_size = self.img_size[0]\n block_flops = dict(\n conv1=img_size / 2 * img_size / 2 * 3 * 128 * 7 * 7,\n conv2=img_size / 2 * img_size / 2 * 128 * 128 * 3 * 3,\n conv3=img_size / 2 * img_size / 2 * 128 * 128 * 3 * 3,\n proj=img_size / 2 * img_size / 2 * 128 * self.embed_dim,\n )\n return sum(block_flops.values())\n\n\nimport torch\nimport torch.nn as nn\n\nfrom timm.models.helpers import load_pretrained\nfrom timm.models.registry import register_model\nfrom timm.models.layers import trunc_normal_\nfrom timm.models.resnet import resnet26d, resnet50d, resnet101d\nimport numpy as np\n\n# from .layers import *\n\n\ndef _cfg(url=\"\", **kwargs):\n return {\n \"url\": url,\n \"num_classes\": 1000,\n \"input_size\": (3, 224, 224),\n \"pool_size\": None,\n \"crop_pct\": 0.9,\n \"interpolation\": \"bicubic\",\n \"mean\": (0.485, 0.456, 0.406),\n \"std\": (0.229, 0.224, 0.225),\n \"classifier\": \"head\",\n **kwargs,\n }\n\n\ndefault_cfgs = {\n \"LV_ViT_Tiny\": _cfg(),\n \"LV_ViT\": _cfg(),\n \"LV_ViT_Medium\": _cfg(crop_pct=1.0),\n \"LV_ViT_Large\": _cfg(crop_pct=1.0),\n}\n\n\ndef get_block(block_type, **kargs):\n if block_type == \"mha\":\n # multi-head attention block\n return MHABlock(**kargs)\n elif block_type == \"ffn\":\n # feed forward block\n return FFNBlock(**kargs)\n elif block_type == \"tr\":\n # transformer block\n return Block(**kargs)\n\n\ndef rand_bbox(size, lam):\n W = size[2]\n H = size[3]\n cut_rat = np.sqrt(1.0 - lam)\n cut_w = np.int(W * cut_rat)\n cut_h = np.int(H * cut_rat)\n\n # uniform\n cx = np.random.randint(W)\n cy = np.random.randint(H)\n\n bbx1 = np.clip(cx - cut_w // 2, 0, W)\n bby1 = np.clip(cy - cut_h // 2, 0, H)\n bbx2 = np.clip(cx + cut_w // 2, 0, W)\n bby2 = np.clip(cy + cut_h // 2, 0, H)\n\n return bbx1, bby1, bbx2, bby2\n\n\ndef get_dpr(drop_path_rate, depth, drop_path_decay=\"linear\"):\n if drop_path_decay == \"linear\":\n # linear dpr decay\n dpr = [\n x.item() for x in torch.linspace(0, drop_path_rate, depth)\n ] # stochastic depth decay rule\n elif drop_path_decay == \"fix\":\n # use fixed dpr\n dpr = [drop_path_rate] * depth\n else:\n # use predefined drop_path_rate list\n assert len(drop_path_rate) == depth\n dpr = drop_path_rate\n return dpr\n\n\nclass LV_ViT(nn.Module):\n \"\"\"Vision Transformer with tricks\n Arguements:\n p_emb: different conv based position embedding (default: 4 layer conv)\n skip_lam: residual scalar for skip connection (default: 1.0)\n order: which order of layers will be used (default: None, will override depth if given)\n mix_token: use mix token augmentation for batch of tokens (default: False)\n return_dense: whether to return feature of all tokens with an additional aux_head (default: False)\n \"\"\"\n\n def __init__(\n self,\n img_size=224,\n patch_size=16,\n in_chans=3,\n num_classes=1000,\n embed_dim=768,\n depth=12,\n num_heads=12,\n mlp_ratio=4.0,\n qkv_bias=False,\n qk_scale=None,\n drop_rate=0.0,\n attn_drop_rate=0.0,\n drop_path_rate=0.0,\n drop_path_decay=\"linear\",\n hybrid_backbone=None,\n norm_layer=nn.LayerNorm,\n p_emb=\"4_2\",\n head_dim=None,\n skip_lam=1.0,\n order=None,\n mix_token=False,\n return_dense=False,\n ):\n super().__init__()\n self.num_classes = num_classes\n self.num_features = (\n self.embed_dim\n ) = embed_dim # num_features for consistency with other models\n self.output_dim = embed_dim if num_classes == 0 else num_classes\n if hybrid_backbone is not None:\n self.patch_embed = HybridEmbed(\n hybrid_backbone,\n img_size=img_size,\n in_chans=in_chans,\n embed_dim=embed_dim,\n )\n else:\n if p_emb == \"4_2\":\n patch_embed_fn = PatchEmbed4_2\n elif p_emb == \"4_2_128\":\n patch_embed_fn = PatchEmbed4_2_128\n else:\n patch_embed_fn = PatchEmbedNaive\n\n self.patch_embed = patch_embed_fn(\n img_size=img_size,\n patch_size=patch_size,\n in_chans=in_chans,\n embed_dim=embed_dim,\n )\n\n num_patches = self.patch_embed.num_patches\n\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n if order is None:\n dpr = get_dpr(drop_path_rate, depth, drop_path_decay)\n self.blocks = nn.ModuleList(\n [\n Block(\n dim=embed_dim,\n num_heads=num_heads,\n head_dim=head_dim,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n qk_scale=qk_scale,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n drop_path=dpr[i],\n norm_layer=norm_layer,\n skip_lam=skip_lam,\n )\n for i in range(depth)\n ]\n )\n else:\n # use given order to sequentially generate modules\n dpr = get_dpr(drop_path_rate, len(order), drop_path_decay)\n self.blocks = nn.ModuleList(\n [\n get_block(\n order[i],\n dim=embed_dim,\n num_heads=num_heads,\n head_dim=head_dim,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n qk_scale=qk_scale,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n drop_path=dpr[i],\n norm_layer=norm_layer,\n skip_lam=skip_lam,\n )\n for i in range(len(order))\n ]\n )\n\n self.norm = norm_layer(embed_dim)\n self.head = (\n nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n )\n\n self.return_dense = return_dense\n self.mix_token = mix_token\n\n if return_dense:\n self.aux_head = (\n nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n )\n if mix_token:\n self.beta = 1.0\n assert return_dense, \"always return all features when mixtoken is enabled\"\n\n trunc_normal_(self.pos_embed, std=0.02)\n trunc_normal_(self.cls_token, std=0.02)\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=0.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, GroupLinear):\n trunc_normal_(m.group_weight, std=0.02)\n if isinstance(m, GroupLinear) and m.group_bias is not None:\n nn.init.constant_(m.group_bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {\"pos_embed\", \"cls_token\"}\n\n def get_classifier(self):\n return self.head\n\n def reset_classifier(self, num_classes, global_pool=\"\"):\n self.num_classes = num_classes\n self.head = (\n nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n )\n\n def forward_embeddings(self, x):\n x = self.patch_embed(x)\n return x\n\n def forward_tokens(self, x):\n B = x.shape[0]\n cls_tokens = self.cls_token.expand(B, -1, -1)\n x = torch.cat((cls_tokens, x), dim=1)\n x = x + self.pos_embed\n x = self.pos_drop(x)\n for blk in self.blocks:\n x = blk(x)\n x = self.norm(x)\n return x\n\n def forward_features(self, x):\n # simple forward to obtain feature map (without mixtoken)\n x = self.forward_embeddings(x)\n x = x.flatten(2).transpose(1, 2)\n x = self.forward_tokens(x)\n return x\n\n def forward(self, x):\n x = self.forward_embeddings(x)\n\n # token level mixtoken augmentation\n if self.mix_token and self.training:\n lam = np.random.beta(self.beta, self.beta)\n patch_h, patch_w = x.shape[2], x.shape[3]\n bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam)\n temp_x = x.clone()\n temp_x[:, :, bbx1:bbx2, bby1:bby2] = x.flip(0)[:, :, bbx1:bbx2, bby1:bby2]\n x = temp_x\n else:\n bbx1, bby1, bbx2, bby2 = 0, 0, 0, 0\n\n x = x.flatten(2).transpose(1, 2)\n x = self.forward_tokens(x)\n x_cls = self.head(x[:, 0])\n\n if self.return_dense:\n x_aux = self.aux_head(x[:, 1:])\n if not self.training:\n return x_cls + 0.5 * x_aux.max(1)[0]\n\n # recover the mixed part\n if self.mix_token and self.training:\n x_aux = x_aux.reshape(x_aux.shape[0], patch_h, patch_w, x_aux.shape[-1])\n temp_x = x_aux.clone()\n temp_x[:, bbx1:bbx2, bby1:bby2, :] = x_aux.flip(0)[\n :, bbx1:bbx2, bby1:bby2, :\n ]\n x_aux = temp_x\n x_aux = x_aux.reshape(\n x_aux.shape[0], patch_h * patch_w, x_aux.shape[-1]\n )\n\n return x_cls, x_aux, (bbx1, bby1, bbx2, bby2)\n return x_cls\n\n\n@register_model\ndef vit(pretrained=False, **kwargs):\n model = LV_ViT(\n patch_size=16,\n embed_dim=384,\n depth=16,\n num_heads=6,\n mlp_ratio=3.0,\n p_emb=1,\n **kwargs,\n )\n model.default_cfg = default_cfgs[\"LV_ViT\"]\n return model\n\n\n@register_model\ndef lvvit(pretrained=False, **kwargs):\n model = LV_ViT(\n patch_size=16,\n embed_dim=384,\n depth=16,\n num_heads=6,\n mlp_ratio=3.0,\n p_emb=\"4_2\",\n skip_lam=2.0,\n **kwargs,\n )\n model.default_cfg = default_cfgs[\"LV_ViT\"]\n return model\n\n\n@register_model\ndef lvvit_s(pretrained=False, **kwargs):\n model = LV_ViT(\n patch_size=16,\n embed_dim=384,\n depth=16,\n num_heads=6,\n mlp_ratio=3.0,\n p_emb=\"4_2\",\n skip_lam=2.0,\n return_dense=True,\n mix_token=True,\n **kwargs,\n )\n model.default_cfg = default_cfgs[\"LV_ViT\"]\n return model\n\n\n@register_model\ndef lvvit_m(pretrained=False, **kwargs):\n model = LV_ViT(\n patch_size=16,\n embed_dim=512,\n depth=20,\n num_heads=8,\n mlp_ratio=3.0,\n p_emb=\"4_2\",\n skip_lam=2.0,\n return_dense=True,\n mix_token=True,\n **kwargs,\n )\n model.default_cfg = default_cfgs[\"LV_ViT_Medium\"]\n return model\n\n\n@register_model\ndef lvvit_l(pretrained=False, **kwargs):\n order = [\"tr\"] * 24 # this will override depth, can also be set as None\n model = LV_ViT(\n patch_size=16,\n embed_dim=768,\n depth=24,\n num_heads=12,\n mlp_ratio=3.0,\n p_emb=\"4_2_128\",\n skip_lam=3.0,\n return_dense=True,\n mix_token=True,\n order=order,\n **kwargs,\n )\n model.default_cfg = default_cfgs[\"LV_ViT_Large\"]\n return model\n","sub_path":"code/arch/lvvit_arch.py","file_name":"lvvit_arch.py","file_ext":"py","file_size_in_byte":28415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"114117210","text":"import sys\nimport pprint\nimport copy\nsys.stdin = open('게리맨더링.txt','r')\n\ndef DFS(s):\n global C\n for v in G[s]:\n if v not in C:\n C.append(v)\n DFS(v)\n\ndef comb(k,start,choice,n):\n global R\n if k == n:\n if len(choice) != 0 and len(choice) != N+1:\n _choice = copy.deepcopy(choice)\n R.append(_choice)\n return\n for i in range(start,N+1):\n choice.append(i)\n comb(k+1,i+1,choice,n)\n choice.pop()\n\nN = int(input())\nboard = [list(map(int,input().split())) for _ in range(N+1)]\nG = [[] for _ in range(N+1)]\n\nfor i in range(len(board)):\n for j in board[i]:\n G[i].append(j-1)\n G[j-1].append(i)\nR = []\nfor i in range(1,(N+1)//2+1):\n comb(0,0,[],i)\ncon = set([i for i in range(N+1)])\npoint = []\nfor r in R:\n B = set(r)\n A = con - B\n point.append((A,B))\n\nresult = []\nMin = 99999999\ncount = 0\nfor i in range(len(point)):\n flag = 0\n for j in range(2):\n Ar = list(point[i][j]) \n C = [Ar[0]] \n DFS(Ar[0])\n if len(set(Ar) - set(C))==0:\n flag += 1\n if flag == 2:\n count += 1\nprint(count)\n# Min = min(Min,abs(sum(point[i][0]) - sum(point[i][1])))\n# print(Min)","sub_path":"10월/1002/게리맨더링.py","file_name":"게리맨더링.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"566849147","text":"\"\"\"Contain All Models for Sudoku.\nIn descending order\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n# from torchsummary import *\n\nclass CNN_SS(nn.Module):\n \"\"\"Use semantic segmentation techniques to get a probability output, which indicate\n the relenace of values (1 to 9) for each position.\n \"\"\"\n\n def __init__(self, in_channels=1, out_channels=1):\n\n # Basics\n super(CNN_SS, self).__init__()\n self.version = 'MT3'\n # Initializing all layers\n self.conv1 = nn.Conv2d(in_channels, 20, 3)\n self.conv2 = nn.Conv2d(20, 50, 3)\n self.conv3 = nn.Conv2d(50, 100, 3)\n\n self.deconv1 = nn.ConvTranspose2d(100, 50, 3)\n self.deconv2 = nn.ConvTranspose2d(50, 20, 3)\n self.deconv3 = nn.ConvTranspose2d(20, out_channels, 3)\n\n def forward(self, input):\n x = F.relu(self.conv1(input))\n # print('conv1:', x.shape)\n\n x = F.relu(self.conv2(x))\n # print('conv2:', x.shape)\n\n x = F.relu(self.conv3(x))\n # print('conv3:', x.shape)\n\n x = F.relu(self.deconv1(x))\n # print('conv3:', x.shape)\n\n x = F.relu(self.deconv2(x))\n # print('conv3:', x.shape)\n\n x = F.softmax(self.deconv3(x), dim=1)\n # print('conv3:', x.shape)\n # raise NotImplementedError\n return x\n\nclass sudokuCNN(nn.Module):\n \"\"\"Model Type 2 (MT2)\n\n CNN\n \"\"\"\n\n def __init__(self, in_channels=1, out_channels=1):\n\n # Basics\n super(sudokuCNN, self).__init__()\n self.version = 'MT2'\n # Initializing all layers\n self.conv1 = nn.Conv2d(in_channels, 20, 3)\n self.conv2 = nn.Conv2d(20, 50, 3)\n self.conv3 = nn.Conv2d(50, 100, 3)\n\n self.deconv1 = nn.ConvTranspose2d(100, 50, 3)\n self.deconv2 = nn.ConvTranspose2d(50, 20, 3)\n self.deconv3 = nn.ConvTranspose2d(20, out_channels, 3)\n\n def forward(self, input):\n x = F.relu(self.conv1(input))\n # print('conv1:', x.shape)\n\n x = F.relu(self.conv2(x))\n # print('conv2:', x.shape)\n\n x = F.relu(self.conv3(x))\n # print('conv3:', x.shape)\n\n x = F.relu(self.deconv1(x))\n # print('conv3:', x.shape)\n\n x = F.relu(self.deconv2(x))\n # print('conv3:', x.shape)\n\n x = self.deconv3(x)\n # print('conv3:', x.shape)\n # raise NotImplementedError\n return x\n\n\nclass sudokuModel(nn.Module):\n \"\"\"Model Type 1 (MT1)\n\n Linear Model\n \"\"\"\n\n def __init__(self, in_channels=1, out_channels=10):\n\n # Basics\n super(sudokuModel, self).__init__()\n self.version = 'MT1'\n # Initializing all layers\n self.fc1 = nn.Linear(in_channels, 100)\n self.fc2 = nn.Linear(100, 200)\n self.fc3 = nn.Linear(200, 100)\n self.fc4 = nn.Linear(100, out_channels)\n\n def forward(self, input):\n x = F.relu(self.fc1(input))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n return self.fc4(x)\n\n\nif __name__ == \"__main__\":\n\n model = sudokuCNN()\n print(model.__doc__)\n","sub_path":"Sudoku/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"70920102","text":"from random import *\nfrom rit_object import *\n\n##from baseGen import * ### ==> diceRolling()\nfrom skills import *\nfrom talents import *\n\n\"\"\"\n0 = feral +T +S -Inf\n1 = forge +Int +T -Fel\n2 = highborn +Fel +Inf -T\n3 = hive +Per +Ag -WP\n4 = shrine +WP +Fel -Per\n5 = void +Int +WP -S\n\n0 = administratum \n1 = arbites\n2 = telepathica\n3 = admech\n4 = ministorum\n5 = guard\n6 = outcast\n\n0 = assassin\n1 = chirurgeon\n2 = desperado\n3 = hierophant\n4 = mystic\n5 = sage\n6 = seeker\n7 = warrior\n\"\"\"\n\nclass Character( rit_object ):\n __slots__ = ( 'name', 'stats', 'apt', 'backg', 'talents', 'skills', 'origRolls', 'xp' )\n _types = ( str, object, object, object, object, dict, object, object )\n\nclass TotalXP( rit_object ):\n __slots__ = ( 'earned', 'skills', 'talents', 'attrs', 'adv', 'spent' )\n _types = ( int, int, int, int, int, int )\n### xp earned; spent on skills; spent on talents; spent on talents;\n### spent on attributes; spent on elite advances; total spent\n\nclass Background( rit_object ):\n __slots__ = ( 'homeworld', 'background', 'role' )\n _types = ( list, list, list )\n\nclass Aptitudes( rit_object ):\n __slots__ = ( 'charApt', 'attrApt' )\n _types = ( list, list )\n\nclass DiceRolls( rit_object ):\n __slots__ = ( 'WS', 'BS', 'S', 'T', 'Ag', 'Int', 'Per', 'WP', 'Fel', 'Inf', 'Wounds', 'Fate' )\n _types = ( list, list, list, list, list, list, list, list, list, list, int, int )\n\nclass Characteristics( rit_object ):\n __slots__ = ( 'WS', 'BS', 'S', 'T', 'Ag', 'Int', 'Per', 'WP', 'Fel', 'Inf', 'Wounds', 'Fate' )\n _types = ( int, int, int, int, int, int, int, int, int, int, int, int )\n\n\n## [ name, aptitude, +, -, wounds, fate ]\nHOMEWORLD_LIST = [[ 'feral', 3, 4, 9, 9, [ 2, 3 ] ],\n [ 'forge', 5, 3, 8, 8, [ 3, 8 ] ],\n [ 'highborn', 8, 9, 3, 9, [ 4, 10] ],\n [ 'hive', 6, 4, 7, 8, [ 2, 6 ] ],\n [ 'shrine', 7, 8, 6, 7, [ 3, 6 ] ],\n [ 'void', 5, 7, 2, 7, [ 3, 5 ] ]]\n\n\n## [ name, aptitudes ]\nBACKGROUND_LIST =[[ 'administratum', [ 6, 9 ] ],\n [ 'arbites', [ 1, 2 ] ],\n [ 'telepathica', [ 2, 7 ] ],\n [ 'admech', [ 6, 5 ] ],\n [ 'ministorum', [ 8, 9 ] ],\n [ 'guard', [ 4, 8 ] ],\n [ 'outcast', [ 4, 9 ] ]]\n\n\n## [ name, attrApts, stdAppts\nROLE_LIST = [[ 'assassin', [ [ 0, 1 ], 4, 6 ], [ 3, 4 ] ],\n [ 'chirurgeon', [ 2, 3, 5 ], [ 4, 6 ] ],\n [ 'desperado', [ 1, 4, 8 ], [ 2, 3 ] ], \n [ 'hierophant', [ 3, 7, 8 ], [ 1, 9 ] ],\n [ 'mystic', [ 5, 6, 7 ], [ 2, 6 ] ],\n [ 'sage', [ 5, 6, 7 ], [ 5, 6 ] ], \n [ 'seeker', [ 5, 6, 8 ], [ 5, 9 ] ],\n [ 'warrior', [ 0, 1, 2 ], [ 1, 2 ] ]]\n\nHOMEWORLDS = {}\n\nfor hw in HOMEWORLD_LIST:\n HOMEWORLDS[ hw[ 0 ] ] = hw[ 1: ]\n\nBACKGROUNDS = {}\n\nfor bg in BACKGROUND_LIST:\n BACKGROUNDS[ bg[ 0 ] ] = bg[ 1 ]\n\nROLES = {}\n\nfor ro in ROLE_LIST:\n ROLES[ ro[ 0 ] ] = ro[ 1: ]\n\n\n## 0, 1, 2, 3, 4, 5, 6, 7, 8, 9\nSTD_APTITUDE_LIST = [ 'general', 'offense', 'defense', 'finesse', 'fieldcraft', 'tech', 'knowledge', 'psyker', 'leadership', 'social' ]\n## 0, 1, 2, 3, 4, 5, 6, 7, 8\nATTR_APTITUDE_LIST = [ 'WS', 'BS', 'S', 'T', 'Ag', 'Int', 'Per', 'WP', 'Fel' ]\n\n\nNAME = 'Kark' #input( \"Enter name: \" )\nHOMEWORLD = 'feral' #input( \"Enter homeworld: \" )\nBACKGROUND = 'arbites' #input( \"Enter background: \" )\nROLE = 'assassin' #input( \"Enter role: \" )\n\n\ndef newCharacter():\n \"\"\"\n \n\n returns a Character object\n \"\"\"\n backg = popBackground()\n rolls = diceRolling()\n chars = baseChars( rolls, backg )\n apts = popAptitudes()\n sks = initializeSkills()\n return Character( NAME, chars, apts, backg, None, sks, rolls, TotalXP( 0, 0, 0, 0, 0, 0 ) )\n\ndef popBackground():\n \"\"\"\n \n\n returns a Background object\n \"\"\"\n\n homeworld = HOMEWORLD.lower() ###HOMEWORLD is a str inputted by the user, assumed to be in HOMEWORLD_LIST\n hwLst = HOMEWORLDS[ homeworld ] ###list of ints and a list\n\n background = BACKGROUND.lower() ###BACKGROUND is a str inputted by the user, assumed to be in BACKGROUND_LIST\n bgLst = BACKGROUNDS[ background ] ###list of two ints\n\n role = ROLE.lower() ###ROLE is a str inputted by the user, assumed to be in ROLE_LIST\n roLst = ROLES[ role ] ###list containing two lists of ints\n \n backInd = Background( hwLst, bgLst, roLst )\n\n return backInd\n\ndef popAptitudes():\n \"\"\"\n generates an Aptitudes object with lists of indeces corresponding to relevant aptitudes\n \n refers to two lists, STD_APTITUDE_LIST and ATTR_APTITUDE_LIST, which are lists of strings\n the indeces of both correspond with the relevant integers in HOMEWORLDS, BACKGROUNDS, and ROLES\n\n returns Aptitudes object\n \"\"\"\n\n attrList = ATTR_APTITUDE_LIST\n stdList = STD_APTITUDE_LIST\n\n\n ###HOMEWORLDS, BACKGROUNDS, and ROLES are all dictionary objects\n homeworld = HOMEWORLD.lower() ###HOMEWORLD is a str inputted by the user, assumed to be in HOMEWORLD_LIST\n hwInd = HOMEWORLDS[ homeworld ][ 0 ] ###int\n\n background = BACKGROUND.lower() ###\n bgInd = BACKGROUNDS[ background ] ###list of two ints\n\n role = ROLE.lower() ###ROLE is a str inputted by the user, assumed to be in ROLE_LIST\n roLst = ROLES[ role ] ###list containing two lists of ints\n\n attrApts = [ attrList[ hwInd ] ] ###attrApts is a list of strings\n stdApts = [ stdList[ i ] for i in bgInd ] ###stdApts is a list of strings\n\n for d in range( 0, 3 ):\n if type( roLst[ 0 ][ d ] ) == list: ###Assassin role is only role to have an\n ###option between WS or BS as aptitudes,\n ###implementation will be reworked into something more elegant\n j = input( \"0 or 1\" )\n attrApts.append( attrList[ roLst[ 0 ][ d ][ int( j ) ] ] )\n else:\n attrApts.append( attrList[ roLst[ 0 ][ d ] ] )\n\n for f in range( 0, 2 ):\n stdApts.append( stdList[ roLst[ 1 ][ f ] ] )\n\n apts = Aptitudes( attrApts, stdApts )\n\n return apts\n\ndef diceRolling():\n \"\"\"\n rolls dice corresponding to each attribute. for attributes with three rolls instead\n of two rolls, it is implied that the third roll doesn't exist until it is needed\n (i.e. player rolls 2d10, then the extra d10 is there for later reference if needed )\n\n returns a DiceRolls object\n \"\"\"\n\n WS = [ randint( 1, 10 ), randint( 1, 10 ) ]\n BS = [ randint( 1, 10 ), randint( 1, 10 ) ]\n S = [ randint( 1, 10 ), randint( 1, 10 ), randint( 1, 10 ) ]\n T = [ randint( 1, 10 ), randint( 1, 10 ), randint( 1, 10 ) ]\n Ag = [ randint( 1, 10 ), randint( 1, 10 ), randint( 1, 10 ) ]\n Int = [ randint( 1, 10 ), randint( 1, 10 ), randint( 1, 10 ) ]\n Per = [ randint( 1, 10 ), randint( 1, 10 ), randint( 1, 10 ) ]\n WP = [ randint( 1, 10 ), randint( 1, 10 ), randint( 1, 10 ) ]\n Fel = [ randint( 1, 10 ), randint( 1, 10 ), randint( 1, 10 ) ]\n Inf = [ randint( 1, 10 ), randint( 1, 10 ), randint( 1, 10 ) ]\n woundsRoll = randint( 1, 5 )\n fateRoll = randint( 1, 10 )\n\n rolls = DiceRolls( WS, BS, S, T, Ag, Int, Per, WP, Fel, Inf, woundsRoll, fateRoll )\n\n return rolls\n\ndef baseChars( rolls, background ):\n \"\"\"\n takes the dicerolls and the chosen background options, and derives\n the final starting characteristics (attrs, wounds, fate)\n\n rolls -- a DiceRolls object, with each attr being a list of 2 or 3 ints\n between 1 and 10, and wounds and fate being a single int\n background -- a Background object, which contains lists containing the relevant\n indices for attributes, one list for each of homeworld, background,\n and role, each of which may contain more lists\n\n returns a Characteristics object, which contains the final beginning characteristics\n for the created character, all attrs and max wounds and fate points\n \"\"\"\n\n ###List of lists\n attrs = [ rolls.WS, rolls.BS, rolls.S, rolls.T, rolls.Ag,\n rolls.Int, rolls.Per, rolls.WP, rolls.Fel, rolls.Inf ]\n\n home = background.homeworld ###list of ints and a list\n\n ### len( baseAttrs ) == len( attrs )\n baseAttrs = [ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20 ]\n\n ###Sorting the first \"+\" attr \n attrs[ home[ 0 ] ] = sorted( attrs[ home[ 0 ] ] )\n attrs[ home[ 0 ] ] = attrs[ home[ 0 ] ][ 1: ] ###Cuts off the lowest roll\n\n ###Sorting the second \"+\" attr \n attrs[ home[ 1 ] ] = sorted( attrs[ home[ 1 ] ] )\n attrs[ home[ 1 ] ] = attrs[ home[ 1 ] ][ 1: ] ###Cuts off the lowest roll\n\n ###Sorting the \"-\" attr \n attrs[ home[ 2 ] ] = sorted( attrs[ home[ 2 ] ] )\n attrs[ home[ 2 ] ] = attrs[ home[ 2 ] ][ 0:2 ] ###Cuts off the highest roll\n\n ###adds the wounds roll to the base starting wounds for a given homeworld\n wounds = rolls.Wounds + home[ 3 ]\n\n ###Adds the two dice rolls for each attr with the base attr (20) into a single int between 22 and 40\n for e in range( 0, 10 ):\n for d in range( 0, 2 ):\n baseAttrs[ e ] = baseAttrs[ e ] + attrs[ e ][ d ]\n\n ###home[ 4 ] is a list of two ints, the first is the base FP for that homeworld, the second is\n ###the number that must be rolled >= with rolls.Fate to increase the base by 1\n if rolls.Fate >= home[ 4 ][ 1 ]:\n fate = home[ 4 ][ 0 ] + 1\n else:\n fate = home[ 4 ][ 0 ]\n \n return Characteristics( baseAttrs[ 0 ], baseAttrs[ 1 ], baseAttrs[ 2 ], baseAttrs[ 3 ],\n baseAttrs[ 4 ], baseAttrs[ 5 ], baseAttrs[ 6 ], baseAttrs[ 7 ],\n baseAttrs[ 8 ], baseAttrs[ 9 ], wounds, fate )\n\nprint( newCharacter() )\n","sub_path":"src/genCore.py","file_name":"genCore.py","file_ext":"py","file_size_in_byte":10556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"57683207","text":"import json\nimport random\nimport time\nfrom urllib import request, parse\n\nfrom selenium import webdriver\n\n\nclass QQmessage:\n def __init__(self):\n self.login_url = 'http://web2.qq.com/'\n self.send_msg_url = 'http://d1.web2.qq.com/channel/send_buddy_msg2'\n self.browser = webdriver.Chrome(executable_path='/Users/clymer/Tools/chromedriver')\n\n def __get_data(self):\n self.browser.get(self.login_url)\n input('please enter something to continue')\n friend_list = []\n friends = self.browser.find_elements_by_css_selector(\".list_group_body .list_item\")\n\n for friend in friends:\n _uin = friend.find_element_by_css_selector(\".avatar\")\n friend_list.append(_uin.get_attribute(\"_uin\"))\n\n # 拼接cookie 格式为name+等号+value+分号\n cookies = \"\"\n for cookie in self.browser.get_cookies()[::-1]:\n cookies = cookies + cookie[\"name\"] + \"=\" + cookie[\"value\"] + \";\"\n\n return friend_list, cookies\n\n def __headers(self, cookies):\n headers = {\n \"Accept\": \"*/*\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n \"Connection\": \"keep-alive\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Host\": \"d1.web2.qq.com\",\n \"Cookie\": cookies,\n \"Origin\": \"http://d1.web2.qq.com\",\n \"Referer\": \"http://d1.web2.qq.com/cfproxy.html?v=20151105001&callback=1\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36\"\n }\n return headers\n\n def __post_data(self, content, _uin, headers):\n form_data = {\n \"r\": r'{\"to\":' + _uin + r',\"content\":\"[\\\"' + content + r' \\\",[\\\"font\\\",{\\\"name\\\":\\\"宋体\\\",\\\"size\\\":10,\\\"style\\\":[0,0,0],\\\"color\\\":\\\"000000\\\"}]]\",\"face\":597,\"clientid\":53999199,\"msg_id\":19090001,\"psessionid\":\"8368046764001d636f6e6e7365727665725f77656271714031302e3133332e34312e383400001ad00000066b026e040015808a206d0000000a406172314338344a69526d0000002859185d94e66218548d1ecb1a12513c86126b3afb97a3c2955b1070324790733ddb059ab166de6857\"}'\n }\n try:\n data = parse.urlencode(form_data).encode('utf-8')\n req = request.Request(url=self.send_msg_url, data=data, headers=headers)\n response = request.urlopen(req).read().decode('utf-8')\n return json.loads(response)\n except Exception:\n print('发送消息失败!')\n\n def send(self):\n content = input(\"请输入要发送的内容:\")\n friend_list, cookies = self.__get_data()\n headers = self.__headers(cookies)\n self.browser.quit()\n\n for _uin in friend_list:\n result = self.__post_data(content, _uin, headers)\n print(result)\n time.sleep(random.randint(5, 7))\n\n\nif __name__ == '__main__':\n msg = QQmessage()\n msg.send()\n","sub_path":"qqmessage/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"391165242","text":"# future for running the code on python 2 and import the code from python2 to python3\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\ndef cnn_model_fn(features, labels, mode):\n ## input layer\n # input tensors should have a shape of [batch_size, image_height, image_width, channels] by default for the other layers to use them\n # tf.reshape(tensor, shape, name=None)\n # If one component of shape is the special value -1, the size of that dimension is computed so that the total size remains constant. In particular, a shape of [-1] flattens into 1-D. At most one component of shape can be -1.\n input_layer = tf.reshape(features['x'], [-1,28,28,1], name=\"intial name\")\n\n\n ## Convolutional Layer #1\n # 32 5*5 filters applied with ReLU activation fu\n conv1 = tf.layers.conv2d( # output tensor - [batch_size, 28, 28, 32]\n inputs = input_layer, #tensor input\n filters = 32, #no of filters\n kernel_size = [5, 5], #h and w of the 2D conv window\n padding = \"same\", #zero padding added to preserve h and w of 28- output tensor should have the same h and w as the input tensor\n activation = tf.nn.relu, #activation func - max(features,0)\n name=\"convulation layer 1\"\n )\n\n ## Pooling Layer #1\n # input tensor should have the shape [batch_size, image_height, image_width, channels] which here is [batch_size, 28, 28, 32]\n # pool size - size of max pooling filter\n # strides - no of pixels between each recepetive field\n # OUTPUT tensor - [batch_size, 14, 14, 32]\n pool1 = tf.layers.max_pooling2d(inputs = conv1, pool_size = [2, 2], strides = 2, name = \"pooling layer 1\")\n\n ## Convolutional Layer #2\n conv2 = tf.layers.conv2d( #output tensor - [batch_size, 14, 14, 64]\n inputs = pool1,\n filters = 64,\n kernel_size = [5, 5],\n padding = \"same\",\n activation = tf.nn.relu,\n name=\"convulation layer 2\"\n )\n\n ## Pooling Layer #2\n # output tensor - [batch_size, 7, 7, 64]\n pool2 = tf.layers.max_pooling2d(inputs = conv2, pool_size = [2, 2], strides = 2, name = 'pooling layer 2')\n\n ## Dense Layer\n #flat out input to [batch_size, features] i.e. [batch_size, 3136]\n pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64], name=\"flatting layer pool\")\n #units - number of neurons in dense layer,1024 ??\n dense = tf.layers.dense(inputs = pool2_flat, units = 1024, activation = tf.nn.relu, name = \"dense layer 1\")\n # dense - [batch_size, 1024]\n dropout = tf.layers.dropout(inputs = dense, rate = 0.4, training = mode == tf.estimator.ModeKeys.TRAIN, name = \"dropout \")\n # dropout - [batch_size, 1024]\n\n ## Logits Layer\n # return the raw values for our predictions. We create a dense layer with 10 neurons (one for each target class 0–9), with linear activation (the default)\n # logits - [batch_size, 10]\n logits = tf.layers.dense(inputs=dropout, units=10)\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input = logits, axis = 1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode = mode, predictions = predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n loss = tf.losses.sparse_softmax_cross_entropy(labels = labels, logits = logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.001)\n train_op = optimizer.minimize(\n loss = loss,\n global_step = tf.train.get_global_step()\n )\n return tf.estimator.EstimatorSpec(mode = mode, loss = loss, train_op = train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels = labels, predictions = predictions[\"classes\"])\n }\n\n return tf.estimator.EstimatorSpec(mode = mode, loss = loss, eval_metric_ops = eval_metric_ops)\n\ndef main(unused_argv):\n # Load training and eval data\n mnist = tf.contrib.learn.datasets.load_dataset(\"mnist\")\n train_data = mnist.train.images # Returns np.array\n train_labels = np.asarray(mnist.train.labels, dtype=np.int32)\n eval_data = mnist.test.images # Returns np.array\n eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)\n\n # Create the Estimator\n mnist_classifier = tf.estimator.Estimator( #wraps the model given by model_fn( gives inputs and other paramters ) and the function returns necesaary operations necesaary for training, predictions and evaluations\n model_fn = cnn_model_fn,\n model_dir = \"/yup/machine learning/oriley_hands_on_ml/MNIST/mnist_logs\" #all outputs are written to this dir\n )\n\n # Set up logging for predictions\n tensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n logging_hook = tf.train.LoggingTensorHook(\n tensors = tensors_to_log, #dict that maps the tenosr to be printed\n every_n_iter = 50 #prints the values every 50 iterations\n )\n\n # Train the model\n train_input_fn = tf.estimator.inputs.numpy_input_fn( #Returns input function that would feed dict of numpy arrays into the model.\n x = {\"x\": train_data},\n y = train_labels,\n batch_size = 100,\n num_epochs = None,\n shuffle = True\n )\n mnist_classifier.train(\n input_fn = train_input_fn,\n steps = 20000,\n hooks = [logging_hook]\n )\n\n # Evaluate the model and print results\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x = {\"x\": eval_data},\n y = eval_labels,\n num_epochs = 1,\n shuffle = False\n )\n eval_results = mnist_classifier.evaluate(input_fn = eval_input_fn)\n # print (eval_results)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"337997102","text":"#imports necessary functions and variables from other files\r\nimport media\r\nimport fresh_tomatoes\r\n\r\n#create a new instance of the class Movie\r\ntoy_story = media.Movie(\"Toy Story\",\r\n \"A boy's toys come to life!\",\r\n \"http://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg\",\r\n \"https://www.youtube.com/watch?v=vwyZH85NQC4\")\r\n\r\n#create a new instance of the class Movie\r\navatar = media.Movie(\"Avatar\",\r\n \"Planet with blue aliens get invaded by humans!\",\r\n \"http://upload.wikimedia.org/wikipedia/en/thumb/b/b0/Avatar-Teaser-Poster.jpg/220px-Avatar-Teaser-Poster.jpg\",\r\n \"https://www.youtube.com/watch?v=cRdxXPV9GNQ\")\r\n\r\n#create a new instance of the class Movie\r\nkung_pow = media.Movie(\"Kung Pow! Enter the Fist\",\r\n \"Chosen One kills Betty\",\r\n \"http://upload.wikimedia.org/wikipedia/en/thumb/5/54/Kungpow.jpg/220px-Kungpow.jpg\",\r\n \"www.youtube.com/watch?v=GXrAYdSeWY8\")\r\n\r\n#create a new instance of the class Movie\r\nairplane = media.Movie(\"Airplane!\",\r\n \"Shenanigans on an airplane that is about to crash\",\r\n \"http://upload.wikimedia.org/wikipedia/en/thumb/f/f5/Airplane%21.jpg/220px-Airplane%21.jpg\",\r\n \"www.youtube.com/watch?v=qaXvFT_UyI8\")\r\n\r\n#create a new instance of the class Movie\r\nthe_room = media.Movie(\"The Room\",\r\n \"Quite possible the worst movie ever made\",\r\n \"http://upload.wikimedia.org/wikipedia/en/thumb/e/e1/TheRoomMovie.jpg/220px-TheRoomMovie.jpg\",\r\n \"www.youtube.com/watch?v=yCj8sPCWfUw\")\r\n\r\n#create a new instance of the class Movie\r\nprimer = media.Movie(\"Primer\",\r\n \"Two friends accidentally invent a time travel machine\",\r\n \"http://upload.wikimedia.org/wikipedia/en/thumb/7/7f/Primer.jpg/220px-Primer.jpg\",\r\n \"www.youtube.com/watch?v=3nj5MMURCm8\")\r\n\r\n#create an array of all the created instances of the class Movie\r\nmovies = [toy_story, avatar, kung_pow, airplane, the_room, primer]\r\n\r\n#call the open_movies_page function by passing the array \"movies\" as an argument\r\nfresh_tomatoes.open_movies_page(movies)\r\n","sub_path":"entertainment_center.py","file_name":"entertainment_center.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"76943372","text":"import xmlrpc.client\nimport json\nimport sys\nimport os\nimport signal\nimport subprocess\nimport time\nfrom mr import mr_hash\n\n\nif __name__ == '__main__':\n config = json.loads( open(\"config.json\").read() )\n mapper_procs = []\n reducer_procs = []\n mappers = []\n reducers = []\n\n # Spawn mapper and reducer processes\n for m in config[\"mapper\"]:\n mproc = subprocess.Popen([\"python\", \"server.py\", m[\"host\"], m[\"port\"]])\n mapper_procs.append(mproc)\n for r in config[\"reducer\"]:\n rproc = subprocess.Popen([\"python\", \"server.py\", r[\"host\"], r[\"port\"]])\n reducer_procs.append(rproc)\n \n # Wait until all the processes have been spawned\n time.sleep(5)\n \n # Connect to mapper and reducer processes\n for m in config[\"mapper\"]:\n mappers.append(xmlrpc.client.ServerProxy('http://'+m[\"host\"]+':'+m[\"port\"]))\n for r in config[\"reducer\"]:\n reducers.append(xmlrpc.client.ServerProxy('http://'+r[\"host\"]+':'+r[\"port\"]))\n \n # Read file content\n with open(sys.argv[1]) as f:\n content_wo_ln = f.readlines()\n content = []\n i = 1\n for line in content_wo_ln:\n content.append((i, line))\n i = i+1\n\n # Run mappers\n chunk = int(len(content)/len(mapper_procs)) + 1\n map_data = []\n for i in range(0, len(config[\"mapper\"])):\n map_data += mappers[i].map(content[i*chunk:(i+1)*chunk])\n\n # Partition map output for reduction\n reduce_partition = []\n for i in range(0, len(config[\"reducer\"])):\n reduce_partition.append([])\n for md in map_data:\n reduce_partition[mr_hash(md[0]) % len(config[\"reducer\"])].append(md)\n \n # Run reducers\n reduced_data = []\n for i in range(0, len(config[\"reducer\"])):\n reduced_data += reducers[i].reduce(reduce_partition[i])\n \n # Write output file\n with open(sys.argv[1]+\".out\", 'w') as f:\n for rd in reduced_data:\n f.write(str(rd[0]) + \" - \" + str(rd[1]) + \"\\n\")\n # for rd in reduced_data:\n # print(str(rd[0]) + \" - \" + str(rd[1]) + \"\\n\")\n\n time.sleep(5)\n\n for mproc in mapper_procs:\n mproc.kill()\n for rproc in reducer_procs:\n rproc.kill()\n","sub_path":"Distributed_Systems/assignment_1/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"391316658","text":"import socket\nimport logging\n\nfrom . import Controller\n\nfrom PyUniversalRobot.model import CollisionModel\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\nclass AntiCollisionController(Controller):\n def __init__(self, *args, **kwargs):\n kinematics = kwargs['kinematics']\n self._collider = kwargs['collider']\n self._slow_margin = kwargs.pop('slow_margin', 0.02)\n self._stop_margin = kwargs.pop('stop_margin', 0.01)\n self._precollision_scale = kwargs.pop('precollision_scale', 0.1)\n\n if self._slow_margin < self._stop_margin:\n raise ValueError('slow_margin ({}) is less than stop_margin ({})'.format(self._slow_margin, self._stop_margin))\n if self._stop_margin < 0:\n raise ValueError('stop_margin ({}) is negative'.format(self._stop_margin))\n if self._precollision_scale > 1 or self._precollision_scale < 0:\n raise ValueError('precollision_scale ({}) is not in [0, 1]'.format(self._precollision_scale))\n\n # insert collision checking filter\n kwargs.setdefault('filters', []).append(self._check)\n\n super(AntiCollisionController, self).__init__(*args, **kwargs)\n\n # initialize collision model\n kwargs['margin'] = self._slow_margin\n self._model = CollisionModel(**kwargs)\n\n self._last_scale = None\n\n def _check(self, state):\n # check for collisions\n self._model.pose(state.target_q)\n collisions = self._collider.check()\n if collisions:\n # find closest collision\n limiting_collision = min(collisions, key=lambda c: c.distance)\n distance = limiting_collision.distance\n\n if distance > 0:\n # no problem due to margin\n self.speed_scale(1)\n elif distance > -(self._slow_margin - self._stop_margin):\n print(limiting_collision)\n if distance > -(self._slow_margin - self._stop_margin) / 2:\n # linear slowdown for first half of slow margin\n self.speed_scale((1 - distance / (-(self._slow_margin - self._stop_margin) / 2)) * (1 - self._precollision_scale) + self._precollision_scale)\n else:\n # fixed slow speed for second half of slow margin\n self.speed_scale(self._precollision_scale)\n else:\n # stuck here in collision\n # NOTE: setting the speed scale to zero suspends the robot controller program so halt instead for now\n self.speed_scale(0.01)\n self.servo(halt=True)\n\n print(limiting_collision, self.speed_scale())\n else:\n # no collision\n self.speed_scale(1)\n","sub_path":"Resources/UR5e_Control_API/PyUniversalRobot-master/src/PyUniversalRobot/control/anticollision.py","file_name":"anticollision.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"43655442","text":"#!/bin/env python\n\"\"\"script to do X\"\"\"\nfrom DIRAC.Core.Base.Script import parseCommandLine\nparseCommandLine()\n\nfrom ILCDIRAC.Interfaces.API.NewInterface.Applications import GaudiApp, DelphesApp\nfrom ILCDIRAC.Interfaces.API.NewInterface.UserJob import UserJob\nfrom ILCDIRAC.Interfaces.API.DiracILC import DiracILC\n\ndef run():\n \"\"\"Run The Job.\"\"\"\n job = UserJob()\n job.setConfigPackage(\"fccConfig\", 'key4hep-devel-2')\n\n ga = GaudiApp()\n ga.setVersion('key4hep-latest')\n ga.setExecutableName(\"k4run\")\n ga.setSteeringFile('k4simdelphesalg_pythia.py')\n ga.setPythia8Card('p8_ee_Zbb_ecm91.cmd')\n ga.setExtraCLIArguments(\"--Pythia8.PythiaInterface.pythiacard pythia8card.cmd --k4SimDelphesAlg.DelphesCard card_IDEA.tcl --k4SimDelphesAlg.DelphesOutputSettings edm4hep_IDEA.tcl\")\n ga.setEnergy(91.188)\n ga.setNumberOfEvents(50)\n ga.setOutputFile('output.root')\n\n job.append(ga)\n job.submit(DiracILC(), mode='local')\n\n\nif __name__ ==\"__main__\":\n run()\n","sub_path":"workflows/4/WF4.py","file_name":"WF4.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"85005744","text":"from __future__ import unicode_literals\n\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom hiku import query\nfrom hiku.graph import Graph, Edge, Field, Link, Option\nfrom hiku.engine import Engine\nfrom hiku.readers.simple import read\nfrom hiku.executors.threads import ThreadsExecutor\n\nfrom .base import TestCase, patch, reqs_eq_patcher\n\n\ndef query_fields1(*args, **kwargs):\n pass\n\n\ndef query_fields2(*args, **kwargs):\n pass\n\n\ndef query_link1(*args, **kwargs):\n pass\n\n\ndef query_link2(*args, **kwargs):\n pass\n\n\ndef _(func):\n def wrapper(*args, **kwargs):\n return globals()[func.__name__](*args, **kwargs)\n return wrapper\n\n\ndef _patch(func):\n return patch('{}.{}'.format(__name__, func.__name__))\n\n\nTEST_ENV = Graph([\n Field('a', _(query_fields1)),\n Field('b', _(query_fields2)),\n Edge('c', [\n Field('d', _(query_fields1)),\n Field('e', _(query_fields2)),\n ]),\n Link('f', _(query_link1), edge='c', requires=None, to_list=True),\n Link('g', _(query_link2), edge='c', requires=None, to_list=True),\n Link('h', _(query_link1), edge='c', requires=None, to_list=True,\n options=[Option('foo')]),\n Link('k', _(query_link1), edge='c', requires=None, to_list=True,\n options=[Option('foo', default=1)]),\n])\n\nthread_pool = ThreadPoolExecutor(2)\n\n\nclass TestEngine(TestCase):\n\n def setUp(self):\n self.engine = Engine(ThreadsExecutor(thread_pool))\n\n def execute(self, query):\n return self.engine.execute(TEST_ENV, read(query))\n\n def testFields(self):\n with _patch(query_fields1) as qf1, _patch(query_fields2) as qf2:\n qf1.return_value = ['a1']\n qf2.return_value = ['b1']\n self.assertResult(self.execute('[:a :b]'), {'a': 'a1', 'b': 'b1'})\n with reqs_eq_patcher():\n qf1.assert_called_once_with([query.Field('a')])\n qf2.assert_called_once_with([query.Field('b')])\n\n def testEdgeFields(self):\n with _patch(query_fields1) as qf1, _patch(query_fields2) as qf2:\n qf1.return_value = ['d1']\n qf2.return_value = ['e1']\n self.assertResult(self.execute('[{:c [:d :e]}]'),\n {'c': {'d': 'd1', 'e': 'e1'}})\n with reqs_eq_patcher():\n qf1.assert_called_once_with([query.Field('d')])\n qf2.assert_called_once_with([query.Field('e')])\n\n def testLinkFields(self):\n with _patch(query_fields1) as qf1, _patch(query_fields2) as qf2,\\\n _patch(query_link1) as ql1:\n ql1.return_value = [1]\n qf1.return_value = [['d1']]\n qf2.return_value = [['e1']]\n result = self.execute('[{:f [:d :e]}]')\n self.assertResult(result, {'f': [{'d': 'd1', 'e': 'e1'}]})\n self.assertEqual(result.idx, {'c': {1: {'d': 'd1', 'e': 'e1'}}})\n with reqs_eq_patcher():\n ql1.assert_called_once_with()\n qf1.assert_called_once_with([query.Field('d')], [1])\n qf2.assert_called_once_with([query.Field('e')], [1])\n\n def testLinks(self):\n with _patch(query_fields1) as qf1, _patch(query_fields2) as qf2,\\\n _patch(query_link1) as ql1, _patch(query_link2) as ql2:\n ql1.return_value = [1]\n qf1.return_value = [['d1']]\n ql2.return_value = [2]\n qf2.return_value = [['e1']]\n result = self.execute('[{:f [:d]} {:g [:e]}]')\n self.assertResult(result, {'f': [{'d': 'd1'}], 'g': [{'e': 'e1'}]})\n self.assertEqual(result.idx, {'c': {1: {'d': 'd1'},\n 2: {'e': 'e1'}}})\n with reqs_eq_patcher():\n ql1.assert_called_once_with()\n qf1.assert_called_once_with([query.Field('d')], [1])\n ql2.assert_called_once_with()\n qf2.assert_called_once_with([query.Field('e')], [2])\n\n def testFieldOptions(self):\n with _patch(query_fields1) as qf1:\n qf1.return_value = ['a1']\n result = self.execute('[(:a {:foo \"bar\"})]')\n self.assertResult(result, {'a': 'a1'})\n with reqs_eq_patcher():\n qf1.assert_called_once_with([\n query.Field('a', options={'foo': 'bar'}),\n ])\n\n def testLinkOption(self):\n with _patch(query_link1) as ql1, _patch(query_fields1) as qf1:\n ql1.return_value = [1]\n qf1.return_value = [['d1']]\n result = self.execute('[{(:h {:foo 5}) [:d]}]')\n self.assertResult(result, {'h': [{'d': 'd1'}]})\n with reqs_eq_patcher():\n ql1.assert_called_once_with({'foo': 5})\n qf1.assert_called_once_with([query.Field('d')], [1])\n\n def testLinkOptionDefault(self):\n with _patch(query_link1) as ql1, _patch(query_fields1) as qf1:\n ql1.return_value = [1]\n qf1.return_value = [['d1']]\n result = self.execute('[{:k [:d]}]')\n self.assertResult(result, {'k': [{'d': 'd1'}]})\n with reqs_eq_patcher():\n ql1.assert_called_once_with({'foo': 1})\n qf1.assert_called_once_with([query.Field('d')], [1])\n\n def testLinkOptionUnknown(self):\n with _patch(query_link1) as ql1, _patch(query_fields1) as qf1:\n ql1.return_value = [1]\n qf1.return_value = [['d1']]\n result = self.execute('[{(:k {:foo 2 :bar 3}) [:d]}]')\n self.assertResult(result, {'k': [{'d': 'd1'}]})\n with reqs_eq_patcher():\n ql1.assert_called_once_with({'foo': 2})\n qf1.assert_called_once_with([query.Field('d')], [1])\n","sub_path":"tests/test_engine.py","file_name":"test_engine.py","file_ext":"py","file_size_in_byte":5713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"220220886","text":"\n\n#6.5 Write code using find() and string slicing (see section 6.10) \n#to extract the number at the end of the line below. \n#Convert the extracted value to a floating point number and print it out.\n#rstrip()\n#startwith(\"dgdgdgdgdg\") for string object\ntext = \"X-DSPAM-Confidence: 0.8475\";\ntext_number=text.find(\" \")\nprint(float(text[text_number+1: len(text)]))\n\n\n","sub_path":"Python_Data_Structures/Assignement_Ch6.py","file_name":"Assignement_Ch6.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"129491018","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server import util\n\n\nclass BadResponse(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, command: str=None, type: str=None, description: str=None): # noqa: E501\n \"\"\"BadResponse - a model defined in Swagger\n\n :param command: The command of this BadResponse. # noqa: E501\n :type command: str\n :param type: The type of this BadResponse. # noqa: E501\n :type type: str\n :param description: The description of this BadResponse. # noqa: E501\n :type description: str\n \"\"\"\n self.swagger_types = {\n 'command': str,\n 'type': str,\n 'description': str\n }\n\n self.attribute_map = {\n 'command': 'command',\n 'type': 'type',\n 'description': 'description'\n }\n self._command = command\n self._type = type\n self._description = description\n\n @classmethod\n def from_dict(cls, dikt) -> 'BadResponse':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The BadResponse of this BadResponse. # noqa: E501\n :rtype: BadResponse\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def command(self) -> str:\n \"\"\"Gets the command of this BadResponse.\n\n type of results: ok or error # noqa: E501\n\n :return: The command of this BadResponse.\n :rtype: str\n \"\"\"\n return self._command\n\n @command.setter\n def command(self, command: str):\n \"\"\"Sets the command of this BadResponse.\n\n type of results: ok or error # noqa: E501\n\n :param command: The command of this BadResponse.\n :type command: str\n \"\"\"\n\n self._command = command\n\n @property\n def type(self) -> str:\n \"\"\"Gets the type of this BadResponse.\n\n short description of the error # noqa: E501\n\n :return: The type of this BadResponse.\n :rtype: str\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type: str):\n \"\"\"Sets the type of this BadResponse.\n\n short description of the error # noqa: E501\n\n :param type: The type of this BadResponse.\n :type type: str\n \"\"\"\n\n self._type = type\n\n @property\n def description(self) -> str:\n \"\"\"Gets the description of this BadResponse.\n\n Brief description of the operation result # noqa: E501\n\n :return: The description of this BadResponse.\n :rtype: str\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description: str):\n \"\"\"Sets the description of this BadResponse.\n\n Brief description of the operation result # noqa: E501\n\n :param description: The description of this BadResponse.\n :type description: str\n \"\"\"\n\n self._description = description\n","sub_path":"DockerHealthMonitorService/health_service/health_interface/swagger_server/models/bad_response.py","file_name":"bad_response.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"4802784","text":"from flask import Flask, render_template, url_for, request, jsonify, session\nimport requests\nfrom clean_message import Pretreatment\nfrom logs.historique import Historisation\nimport json\nfrom urllib.parse import unquote # nécessite un PIP install en prod\n\nimport datetime # pour générer les timestamps de chatlogs\nimport uuid # créer des strings aléatoires pour les noms des fichiers de chatlogs\nimport csv # pour créer les fichiers de chatlogs\n\nmessage_cleaner = Pretreatment()\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'ChatbotApplication'\n\n@app.route('/')\ndef index():\n #Chargement de l'historique si il existe\n if 'username' in session:\n historique_chat = Historisation.load_historique(session)\n return render_template('index.html', historique_chat=historique_chat) #Pour le chatbot seul\n # return render_template('index_test.html', historique_chat=historique_chat) #Pour le chatbot intégré au site\n\n #Créer un fichier historique sinon\n else:\n Historisation.create_historique(session)\n return render_template('index.html') #Pour le chatbot seul\n # return render_template('index_test.html') #Pour le chatbot intégré au site\n\n@app.route('/pretreatment', methods=['GET','POST'])\ndef pretreatment():\n if request.method == 'POST':\n message = request.form['jsdata']\n message = unquote(message)\n\n # Envoi de la réponse de user au chatlog :\n Historisation.save_message(session, 'user', message)\n\n processed_message = message_cleaner.pretreatment(message)\n return json.dumps(processed_message.tolist())\n\n\n@app.route('/get_tag', methods=['GET', 'POST'])\ndef get_tag():\n if request.method == 'POST':\n output_int = int(request.form['jsdata'])\n print(output_int)\n tag = message_cleaner.inverse_labelencoding(output_int)\n\n url = f\"http://api:5000/chatbot/get_tag_output_dic?tag={tag}\" #Pour Docker\n # url = f\"http://localhost:5000/chatbot/get_tag_output_dic?tag={tag}\" #Sans Docker \n\n output_reponse = requests.get(url).json()['liste_output'][0]\n \n # Envoi de la réponse du chatbot au chatlog :\n if output_reponse != None :\n Historisation.save_message(session, 'chatbot', output_reponse)\n\n return json.dumps(output_reponse) \n\n\nif __name__ == '__main__':\n app.run(port=5001, debug=True)\n\n\n\n","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"321668359","text":"#bac,cab,abc are all permutations of abc\nimport collections\nimport unittest\n\ndef permutation(s1, s2):\n\t\n\tif len(s1) != len(s2):\n\t\treturn False\n\n\ts1_letters = collections.Counter(s1)\n\ts2_letters = collections.Counter(s2)\n\n\tif s1_letters == s2_letters:\n\t\treturn True\n\n\treturn False\n\t\n\nclass Test(unittest.TestCase):\n\n\tdataT = [(\"ronan\",\"nanor\"),(\"abcd0\",\"0acbd\"),(\" \", \" \")]\n\tdataF = [(\"ronan\",\"ronaq\"),(\"gareth\",\"garets\")]\n\n\tdef test_permutation(self):\n\t\t#true check\n\t\tfor data in self.dataT:\n\t\t\tactual = permutation(*data)\n\t\t\tself.assertTrue(actual)\n\t\t#False check\n\t\tfor data in self.dataF:\n\t\t\tactual = permutation(*data)\n\t\t\tself.assertFalse(actual)\n\nif __name__ == \"__main__\":\n\tunittest.main()\n\n\n\n\n\n\n\n\t \n\n","sub_path":"Python_Solutions/Arrays and Strings/permutation.py","file_name":"permutation.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"610160714","text":"# 461. Hamming Distance\n\n# The Hamming distance between two integers is the number of positions at which the corresponding bits are different.\n\n# Given two integers x and y, calculate the Hamming distance.\n\n# Note:\n# 0 ≤ x, y < 231.\n\n# Example:\n\n# Input: x = 1, y = 4\n\n# Output: 2\n\n# Explanation:\n# 1 (0 0 0 1)\n# 4 (0 1 0 0)\n# ↑ ↑\n\n# The above arrows point to positions where the corresponding bits are different.\n\nclass HammingDistance:\n def doit(self, x, y):\n \"\"\"\n :type x: int\n :type y: int\n :rtype: int\n \"\"\"\n distance = 0 \n while x + y != 0:\n\n if x & 1 != y & 1:\n distance += 1\n\n x >>= 1\n y >>= 1\n\n return distance\n\n \n def doit1(self, x, y):\n\n a = bin(x)[2:].zfill(32)\n b = bin(y)[2:].zfill(32)\n\n return sum([1 if a[i] != b[i] else 0 for i in range(32)])\n\n # n=0\n # i=0\n # a=bin(x)[2:].zfill(32)\n # b=bin(y)[2:].zfill(32)\n # for i in range(32):\n # if a[i]!=b[i]:\n # n+=1 \n # return n\n ans = 0\n while x + y != 0:\n if x % 2 != y % 2:\n ans += 1\n x >>= 1\n y >>= 1\n\n return ans\n\n\n def doit2(self, x, y):\n r = x ^ y;\n result = 0\n \n while r!=0:\n r = r & (r-1) \n result += 1\n\n return result\n\n\nif __name__ == \"__main__\":\n\n res = HammingDistance().doit(1, 4)\n\n","sub_path":"PythonLeetcode/LeetCodeE/461_HammingDistance.py","file_name":"461_HammingDistance.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"422517294","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlretrieve\nfrom shutil import rmtree\nfrom os import path, mkdir, getcwd, listdir,unlink\nfrom img2pdf import convert\nimport threading\nimport re\nfrom tqdm import tqdm\n\n\n\nprint('''\\n\n--------------------------------------------------------------------------------------------------\n--------------------------------------------------------------------------------------------------\n\n---------------------------------LECTURE NOTES DOWNLOADER----------------------------------------- \n--------------------------------------------------------------------------------------------------\n---------------------------------------USER GUIDE-------------------------------------------------\n\nnotes id in url is string you get after \"https://lecturenotes.in/notes/\" and before \"/\"\nexample: if download url is \" https://lecturenotes.in/notes/12168-programming-in-c/ \"\n\nenter only \" 12168-programming-in-c \" as notes id \n----------------------------------------------------------------------------------------------------\n\n''')\n\n\n\ndef sorted_aphanumeric(data):\n def convert(text): return int(text) if text.isdigit() else text.lower()\n\n def alphanum_key(key): return [convert(c)\n for c in re.split('([0-9]+)', key)]\n return sorted(data, key=alphanum_key)\n\ndef getter(url, dest):\n urlretrieve(url, dest)\n\n\n\nthreads = []\nbase_url = 'https://lecturenotes.in/'\ninput_url = ''\ntemp_dir = path.join(getcwd(), \"images\")\n\ninput_url=input('Enter the notes id : ')\ninput_url=input_url.strip()\nurl = base_url+'notes/'+input_url+'/1'\n\npage = requests.get(url)\n\nsoup = BeautifulSoup(page.content, 'lxml')\ntotal_page =soup.find('span', class_='total_page')\ntotal_page =int(total_page.string.replace(\"/\", \"\").strip())\n\n\ntry:\n mkdir('images')\nexcept:\n rmtree(temp_dir)\n mkdir('images')\n \n \n\nfor i in tqdm(range(1, total_page+1)):\n url = base_url+'notes/'+input_url+'/{}'.format(i)\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'lxml')\n div = soup.find('div', class_='pic')\n image_url = base_url+div['style'].strip().split('url(')[1].split(');')[0]\n image_loc = path.join(temp_dir, \"page{}.jpg\".format(i))\n\n t = threading.Thread(target=getter, args=(str(image_url),str(image_loc)))\n threads.append(t)\n t.start()\n \n\nfor t in threads:\n t.join()\n\n\n\nimage = []\nfor i in sorted_aphanumeric(listdir(temp_dir)):\n image.append(path.join(temp_dir, i))\n\n\n\nwith open(\"ThankYouKiitClub.pdf\", \"wb\") as f:\n f.write(convert(image))\ntry:\n rmtree(temp_dir)\n\nexcept :\n unlink(temp_dir)\n rmtree(temp_dir)\n\n\nprint('''\\n\n------------------------------------Process Completed-----------------------------------------------\n---------------------------------LECTURE NOTES DOWNLOADER----------------------------------------- \n--------------------------------------------------------------------------------------------------\n''')\n","sub_path":"downloader/notes.py","file_name":"notes.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"205741531","text":"# -*- coding: UTF-8 -*-\n\"\"\"\n===============================================================\nauthor:xieqiqi\nemail:xieqiqi@jd.com\ndate:2018\nintroduction:\n Seq2seq for Nnm!\n Not batch!\n===============================================================\n\"\"\"\nimport random\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport math\n#=================================================================\n#=============================Encoders============================\n#=================================================================\nclass Encoder_no_batch(torch.nn.Module):\n '''\n not for batch!\n '''\n def __init__(self,Config):\n super(Encoder_no_batch,self).__init__()\n self.use_cuda=Config.use_cuda\n #Parameters And Base Structers!\n self.vocab_size=Config.encoder_vocab_size\n self.hidden_dim=Config.encoder_hidden_dim\n self.embed_dim=Config.encoder_embed_dim\n self.num_layers=Config.encoder_num_layers\n\n self.embedings=torch.nn.Embedding(self.vocab_size,self.embed_dim)\n self.gru=torch.nn.GRU(self.embed_dim,self.hidden_dim)\n\n def forward(self,input,hidden=None):\n '''\n 以单个wd_id作为基本输入单位,不是一个batch,也不是一整句话!\n :param input:\n :param hidden:\n :return:\n '''\n wd_embed=self.embedings(input).view(1,1,-1)#变成一个tensor(3D):[1,1,embed_dim]\n #开始gru运算!\n output=wd_embed\n for layer in range(self.num_layers):\n output,hidden=self.gru(output,hidden)\n #ready for next time step!\n return output,hidden\n\n # def hidden_init(self):\n # '''\n # Encoder hidden initialization!\n # :return:\n # '''\n # if self.use_cuda:\n # return torch.zeros(1,1,self.hidden_dim).cuda()\n # else:\n # return torch.zeros(1,1,self.hidden_dim)\n\nclass Encoder_for_batch(torch.nn.Module):\n '''\n Support for batch!\n '''\n def __init__(self,Config):\n super(Encoder_for_batch,self).__init__()\n self.use_cuda=Config.use_cuda\n #Parameters And Base Structers!\n self.vocab_size=Config.encoder_vocab_size\n self.hidden_dim=Config.encoder_hidden_dim\n self.embed_dim=Config.encoder_embed_dim\n self.num_layers=Config.encoder_num_layers\n\n self.embedings=torch.nn.Embedding(self.vocab_size,self.embed_dim)\n self.gru=torch.nn.GRU(self.embed_dim,self.hidden_dim)\n\n def forward(self,input_batch,hidden=None):\n '''\n 因为不需要得到每一个时间step的gru计算的结果,所以input长度为max即可!\n :param input:B*T\n :param hidden:\n :return:\n '''\n wd_embed=self.embedings(input_batch).transpose(0, 1)#B*T*embed_dim->T*B*embed_dim\n #开始gru运算!\n output=wd_embed\n for layer in range(self.num_layers):\n output,hidden=self.gru(output,hidden)\n #hidden:?*B*H——>(num_layers * num_directions, batch, hidden_size)\n #outPut:T*B*H——>(seq_len, batch, hidden_size * num_directions)\n return output,hidden\n#=================================================================\n#=============================Attention Mechanism=================\n#=================================================================\n\nclass Attention_Bahdanau_for_batch(torch.nn.Module):\n '''\n Suport for Batch!\n 实现Bahdanau的AM!\n 根据hidden和encoder_outPuts,输出attention的weight:aij(softmax的结果!)\n '''\n def __init__(self,hidden_dim):\n super(Attention_Bahdanau_for_batch,self).__init__()\n # Parameters And Base Structers!\n self.hidden_dim=hidden_dim\n #AM_base_structures!\n # 论文中所说的使用前馈神经网络来计算score(i,j)!输入si-1,与hj,batch运算则是输入cat[si-1(expand T),encoder outputs(T)]\n #输入:B*T*2H->输出:B*T*H\n self.attn=torch.nn.Linear(2*self.hidden_dim,self.hidden_dim)\n #计算score还需要Va!使用nn.Parameter()将其变成Modul的属性,自动加入Parameters的迭代器中!\n #Va随机初始化,并且经过归一化!\n self.v=torch.nn.Parameter(torch.rand(self.hidden_dim))\n bound=1./(math.sqrt(self.v.size(0)))\n self.v.data.uniform_(-bound,bound)\n\n def forward(self,hidden,encoder_outPuts):\n '''\n :param hidden:1*B*H——>(num_layers * num_directions, batch, hidden_size)\n :param encoder_outPuts: T*B*H——>(seq_len, batch, hidden_size * num_directions)\n :return:\n '''\n T=encoder_outPuts.size(0)\n #上一时刻的hidden——>copyT份(T*B*H)\n h=hidden.repeat(T,1,1)#T*B*H\n #计算attention_weight:Va*atten()\n #B*T*2H->B*T*H\n #encoder_outPuts.t:B*T*H——h.t:B*T*H——在第二维cat(h.t,encoder_outPuts,dim=2)\n energy=torch.cat((h.transpose(0,1),encoder_outPuts.transpose(0,1)),dim=2)\n #batch运算,需要对Va——copy B份,即每一句话都对应一个权重Va!\n Va=self.v.repeat(energy.size(0),1).unsqueeze(1)#B*1*H\n #B*1*H_B*H*T=B*1*T\n score=torch.bmm(Va,energy.transpose(1,2))#B*1*T\n weights=F.softmax(score.squeeze(1))#B*T\n return weights\n\nclass Attention_Luong_no_batch(torch.nn.Module):\n '''\n Global Attenion Mechanism!\n 输出:Attention weight\n '''\n def __init__(self,hidden_dim,method):\n super(Attention_Luong_no_batch,self).__init__()\n self.use_cuda=torch.cuda.is_available()\n # Parameters And Base Structers!\n self.hidden_dim=hidden_dim\n self.method=method\n # AM_base_structures!\n if(self.method==\"general\"):\n self.attn=torch.nn.Linear(self.hidden_dim,self.hidden_dim)\n elif(self.method==\"concat\"):\n self.attn=torch.nn.Linear(self.hidden_dim*2,self.hidden_dim)\n #Va\n self.v = torch.nn.Parameter(torch.rand(1,self.hidden_dim))\n bound = 1. / (math.sqrt(self.v.size(1)))\n self.v.data.uniform_(-bound, bound)\n\n def forward(self,hidden,encoder_outputs):\n '''\n :param hidden:[1,1,H]——当前时刻的隐层state\n :param encoder_outputs:[seqlen,1,H]\n :return:\n '''\n seq_len=encoder_outputs.size(0)\n scores=Variable(torch.zeros(seq_len))#seq_len\n scores=scores.cuda() if self.use_cuda else scores\n\n for i in range(seq_len):\n scores[i]=self.score(hidden,encoder_outputs[i])\n #[seqLen]->[1,1,segLen]\n weights=F.softmax(scores).unsqueeze(0).unsqueeze(0)#[1,1,segLen]\n return weights\n\n def score(self,hidden,encoder_output):\n '''\n :param hidden:\n :param encoder_outputs:\n :return:\n '''\n if(self.method==\"dot\"):\n #ht.hs\n return hidden.dot(encoder_output)\n elif(self.method==\"general\"):\n #ht.W.hs\n return hidden.dot(self.attn(encoder_output))\n elif(self.method==\"concat\"):\n #ht:[1,1,H],hs[1,1,H]\n #Va.W.cat[ht;hs]\n return self.v.unsqueeze(0).dot(self.attn(torch.cat((hidden,encoder_output),dim=2)))\n\nclass Attention_Luong_for_batch(torch.nn.Module):\n '''\n Global Attenion Mechanism!\n Support For Batch!\n 输出:Attention weight!\n '''\n def __init__(self,hidden_dim,method):\n super(Attention_Luong_for_batch,self).__init__()\n self.use_cuda=torch.cuda.is_available()\n # Parameters And Base Structers!\n self.hidden_dim=hidden_dim\n self.method=method\n # AM_base_structures!\n if(self.method==\"general\"):\n self.attn=torch.nn.Linear(self.hidden_dim,self.hidden_dim)\n elif(self.method==\"concat\"):\n self.attn=torch.nn.Linear(self.hidden_dim*2,self.hidden_dim)\n #Va\n self.v = torch.nn.Parameter(torch.rand(1,self.hidden_dim))\n # self.v = torch.nn.Parameter(torch.FloatTensor(1,hidden_dim))\n bound = 1. / (math.sqrt(self.v.size(1)))\n self.v.data.uniform_(-bound, bound)\n\n def forward(self,hidden,encoder_outputs):\n '''\n :param hidden:[1,B,H]——当前时刻的隐层state\n :param encoder_outputs:[seqlen,B,H]\n :return:\n '''\n seq_lens=encoder_outputs.size(0)\n this_batch_size=encoder_outputs.size(1)\n scores=Variable(torch.zeros(this_batch_size,seq_lens))#this_batch_size*seq_len\n scores=scores.cuda() if self.use_cuda else scores\n for btz in range(this_batch_size):\n for len in range(seq_lens):\n scores[btz,len] = self.score(hidden[:,btz], encoder_outputs[len,btz].unsqueeze(0))\n #[seqLen]->[1,B,segLen]\n #weights=F.softmax(scores).unsqueeze(0)#[1,B,segLen]\n return F.softmax(scores).unsqueeze(0)\n\n def score(self,hidden,encoder_output):\n '''\n :param hidden:\n :param encoder_outputs:\n :return:\n '''\n if(self.method==\"dot\"):\n #ht.hs\n return hidden.dot(encoder_output)\n elif(self.method==\"general\"):\n #ht.W.hs\n return hidden.squeeze(0).dot(self.attn(encoder_output).squeeze(0))\n elif(self.method==\"concat\"):\n #ht:[1,H],hs[1,H]\n #Va.W.cat[ht;hs]\n tmp=self.attn(torch.cat((hidden,encoder_output),dim=1))\n #\n return self.v.squeeze(0).dot(tmp.squeeze(0))\n#=================================================================\n#=============================Decoders============================\n#=================================================================\nclass Decoder_no_batch_no_Am(torch.nn.Module):\n '''\n not for batch\n Decoder without AM!\n 1:修正理解上的一个错误!context vector 在Decoder中的使用!\n 2:按道理来说Decoder的初始hidden是context vector——不用初始化咯!\n '''\n def __init__(self,Config):\n super(Decoder_no_batch_no_Am,self).__init__()\n self.use_cuda=Config.use_cuda\n #Paremeters and base structures!\n self.vocab_size=Config.decoder_vocab_size\n self.hidden_dim=Config.decoder_hidden_dim\n self.embed_dim=Config.decoder_embed_dim\n self.num_layers=Config.decoder_num_layers\n\n self.embedings=torch.nn.Embedding(self.vocab_size,self.embed_dim)\n self.gru=torch.nn.GRU(self.embed_dim,self.hidden_dim)\n self.out=torch.nn.Linear(self.hidden_dim,self.vocab_size)\n\n def forward(self,input,hidden):\n '''\n :param input:\n :param hidden:\n :return:\n '''\n #wd_embeding\n wd_embed=self.embedings(input).view(1,1,-1)#变成Tensor:3D!\n #input——>relu\n outPut=F.relu(wd_embed)\n #gru运算\n for layer in range(self.num_layers):\n outPut,hidden=self.gru(outPut,hidden)\n #输出——————————注意: 如果使用log_softmax,则要一定使用torch.nn.NLLLoss损失!如果使用softmax,则使用cross_entropy损失!\n outPut=F.log_softmax(self.out(outPut))\n #\n return outPut,hidden\n\nclass Decoder_Bahdanau_AM_for_batch(torch.nn.Module):\n '''\n Support for batch!\n '''\n def __init__(self,Config):\n super(Decoder_Bahdanau_AM_for_batch,self).__init__()\n self.use_cuda=Config.use_cuda\n #Paremeters and base structures!\n self.vocab_size=Config.decoder_vocab_size\n self.hidden_dim=Config.decoder_hidden_dim\n self.embed_dim=Config.decoder_embed_dim\n self.num_layers=Config.decoder_num_layers\n self.droupOut_p=Config.decoder_droupOut_p\n\n self.embedings=torch.nn.Embedding(self.vocab_size,self.embed_dim)\n self.droupOut=torch.nn.Dropout(self.droupOut_p)\n self.gru=torch.nn.GRU(self.embed_dim+self.hidden_dim,self.hidden_dim)\n self.out=torch.nn.Linear(self.hidden_dim*2,self.vocab_size)\n self.AM=Attention_Bahdanau_for_batch(hidden_dim=self.hidden_dim)\n\n def forward(self,input_batch,hidden_prev,encoder_outPuts):\n '''\n 因为是Batch training,所以和一句话的forward不同,hidden——hidden_prev!\n :param input_batch(current input word or last output word)(id):[B]:1D一维的!\n :param hidden_prev:1*B*H——>(???)\n :param encoder_outPuts:T*B*H\n :return:\n '''\n #1:embedding!\n embed_batch=self.embedings(input_batch).unsqueeze(0)#B*H->1*B*H\n #2:embedding->droupOut!\n embed_batch = self.droupOut(embed_batch)\n #3:attention weights\n atten_weight=self.AM(hidden_prev,encoder_outPuts).unsqueeze(1)#B*T->B*1*T\n #4:ct:encoder_outPuts:T*B*H_atten_weight:B*1*T\n context_vector=torch.bmm(atten_weight,encoder_outPuts.transpose(0,1)).transpose(0,1)#B*1*H->1*B*H\n #5:计算St:gru计算!f(St-1,cat[yi-1,ci])\n #注意每一个batch只进行一次GRU计算,注意区分非batch的循环计算!\n rnn_input=torch.cat((embed_batch,context_vector),dim=2).transpose(0,1)#1*B*2H\n #rnn_input:1*B*2H\n #hidden:1*B*H——>(num_layers * num_directions, batch, hidden_size)\n #outPut:1*B*H——>(seq_len, batch, hidden_size * num_directions)\n outPut=rnn_input\n hidden=hidden_prev\n # for layer in range(self.num_layers-1):\n # outPut,hidden=self.gru(outPut,hidden)\n outPut, hidden = self.gru(outPut, hidden)\n #6:计算预测y:y=out(cat[gru_out_t,ct])\n outPut=outPut.squeeze(0)#1*B*H->B*H\n context_vector=context_vector.squeeze(0)#1*B*H->B*H\n outs=self.out(torch.cat((outPut,context_vector),dim=1))#B*2H->B*vocab_size\n #7:logits—>B*vocab_size\n logits=F.log_softmax(outs)\n return logits,hidden,atten_weight\n\nclass Decoder_Luong_AM_no_batch(torch.nn.Module):\n '''\n 基于Luong的G_AM的Decoder的实现!\n Not support for batch!\n '''\n def __init__(self,Config):\n super(Decoder_Luong_AM_no_batch,self).__init__()\n self.use_cuda=Config.use_cuda\n #Paremeters and base structures!\n self.vocab_size=Config.decoder_vocab_size\n self.hidden_dim=Config.decoder_hidden_dim\n self.embed_dim=Config.decoder_embed_dim\n self.num_layers=Config.decoder_num_layers\n self.droupOut_p=Config.decoder_droupOut_p\n self.method=Config.decoder_method\n\n self.embedings=torch.nn.Embedding(self.vocab_size,self.embed_dim)\n self.droupOut=torch.nn.Dropout(self.droupOut_p)\n self.gru_he=torch.nn.GRU(self.embed_dim+self.hidden_dim,self.hidden_dim)\n self.gru_me = torch.nn.GRU(self.embed_dim,self.hidden_dim)\n self.out=torch.nn.Linear(self.hidden_dim*2,self.vocab_size)\n self.AM=Attention_Luong_no_batch(hidden_dim=self.hidden_dim,method=self.method)\n\n def forward(self,input,hidden,encoder_outputs):\n '''\n Not support for batch!\n :param input(id):[1]_1D\n :param hidden:1*1*H\n :param encoder_outputs:seq_len*1*H\n :return:\n '''\n #1:embeding!\n wd_embed=self.embedings(input).view(1,1,-1)#1*1*H\n wd_embed=self.droupOut(wd_embed)\n #2:计算当前时刻隐层输出!论文中我的理解是:gru(yt-1,ht-1)!\n outPut=wd_embed\n outPut,hidden=self.gru_me(outPut,hidden)\n #但是看有人是这么实现的gru(cat(yt - 1, last_Context_Vector), ht - 1)\n # outPut=torch.cat((wd_embed,hidden),dim=2)#1*1*2H\n # outPut,hidden=self.gru_he(outPut,hidden)#[1,1,H],[1,1,H]\n #3:计算contex tVector\n #我根据论文理解:这里计算attn_weights使用的是当前时刻的隐层state,而不是当前时刻hidden输出!\n attn_weights=self.AM(hidden,encoder_outputs)#1*1*seqLen\n #但是有人是使用当前时刻gru输出来计算的!????我感觉我是对的!\n # attn_weights=self.AM(outPut,encoder_outputs)#1*1*segLen\n #context_vector[1,1,H]:attn_weights x encoder_outPuts.transpose(0,1)\n context_vector=torch.bmm(attn_weights,encoder_outputs.transpose(0,1))#1*1*H\n #4:计算yt——out!\n #根据gru的当前时刻的输出和context_vector,预测yt!\n out=self.out(torch.cat((context_vector,outPut),dim=2))#1*1*vocab_size\n #logits\n logits=F.log_softmax(out.squeeze(0))#1*vocab_size\n #\n return logits,hidden,context_vector,attn_weights\n\nclass Decoder_Luong_AM_for_batch(torch.nn.Module):\n '''\n 基于Luong的G_AM的Decoder的实现!\n Support for batch!\n '''\n def __init__(self,Config):\n super(Decoder_Luong_AM_for_batch,self).__init__()\n self.use_cuda=Config.use_cuda\n #Paremeters and base structures!\n self.vocab_size=Config.decoder_vocab_size\n self.hidden_dim=Config.decoder_hidden_dim\n self.embed_dim=Config.decoder_embed_dim\n self.num_layers=Config.decoder_num_layers\n self.droupOut_p=Config.decoder_droupOut_p\n self.method=Config.decoder_method\n\n self.embedings=torch.nn.Embedding(self.vocab_size,self.embed_dim)\n self.droupOut=torch.nn.Dropout(self.droupOut_p)\n self.gru_he=torch.nn.GRU(self.embed_dim+self.hidden_dim,self.hidden_dim)\n self.gru_me = torch.nn.GRU(self.embed_dim,self.hidden_dim)\n self.out=torch.nn.Linear(self.hidden_dim*2,self.vocab_size)\n self.AM=Attention_Luong_for_batch(hidden_dim=self.hidden_dim,method=self.method)\n\n def forward(self,input_batch,hidden_prev,encoder_outputs):\n '''\n Not support for batch!\n :param input_batch(id):[B]:1D一维的!\n :param hidden_prev:1*B*H\n :param encoder_outputs:seq_len*B*H\n :return:\n '''\n #1:embedding!\n # if(self.use_cuda):\n # embed_batch = self.embedings(input_batch).unsqueeze(0).cuda() # B*H->1*B*H\n # else:\n # embed_batch = self.embedings(input_batch).unsqueeze(0) # B*H->1*B*H\n embed_batch = self.embedings(input_batch).unsqueeze(0) # B*H->1*B*H\n #2:embedding->droupOut!\n embed_batch = self.droupOut(embed_batch)\n outPut=embed_batch\n outPut,hidden=self.gru_me(outPut,hidden_prev)#[1,B,H],[1,B,H]\n #3:计算context Vector\n attn_weights=self.AM(hidden,encoder_outputs)#1*B*seqLen\n #context_vector[B,1,H]:attn_weights.transpose(0,1) x encoder_outPuts.transpose(0,1)\n context_vector=torch.bmm(attn_weights.transpose(0,1),encoder_outputs.transpose(0,1))#B*1*H\n #4:计算yt——out!\n #根据gru的当前时刻的输出和context_vector,预测yt!\n out=self.out(torch.cat((context_vector,outPut.transpose(0,1)),dim=2))#B*1*2H->B*1*vocab_size\n #logits\n # logits=F.log_softmax(out.squeeze(1))#B*vocab_size\n out=out.squeeze(1)\n #\n return out,hidden,attn_weights\n#=================================================================\n#============================Seq2Seq Main Structure!==============\n#=================================================================\nclass Seq2seq_all_for_batch(torch.nn.Module):\n '''\n Seq2Seq的main 入口!\n 输入:source_batch:[B,seqLen],target_batch:[B,seqLen]\n 输出:[B,seqLen,target_vocab_size]该batch每一个的预测输出的logts矩阵!\n '''\n def __init__(self,encoder,decoder,Config):\n super(Seq2seq_all_for_batch,self).__init__()\n self.use_cuda = Config.use_cuda\n # Paremeters and base structures!\n self.encoder=encoder\n self.decoder=decoder\n self.teacher_forcing_ratio=Config.teacher_forcing_ratio\n\n def forward(self, source_batch,target_batch):\n '''\n :param source_batch:B*seqLen\n :param target_batch:B*seqLen\n '''\n #BatchSize\n bsz=source_batch.size(0)\n #确定decoder需要的最大时间step!\n target_max_len=target_batch.size(1)\n #target_vocab_size\n target_vocab_size=self.decoder.vocab_size\n #确定输出的维度![seqLen,B,target_vocab_size]\n outPuts=Variable(torch.zeros(target_max_len,bsz,target_vocab_size))\n #encoder\n #hidden:?*B*H——>(num_layers * num_directions, batch, hidden_size)\n #encoder_outPuts:T*B*H——>(seq_len, batch, hidden_size * num_directions)\n encoder_outPuts,hidden=self.encoder(source_batch)#hidden=None\n #decoder:注意Batch形式每一次input的维度[B]\n #初始化decoder的hidden和第一次input!\n hidden=hidden[:self.encoder.num_layers]\n outPut=target_batch[:,0]#SOS作为decoder的第一个输入!\n for len in range(1,target_max_len):\n #outPut:B*vocab_size\n outPut,hidden,_=self.decoder(outPut,hidden,encoder_outPuts)\n outPuts[len]=outPut\n #Prepare for next time step!\n #TODO: Scheduled Sampling只能在train的时候使用!eval的时候不用 Scheduled Sampling和TeacherForcing!\n is_teacher= random.random() > self.teacher_forcing_ratio\n top1=outPut.data.max(1)[1]#取index!\n if(self.use_cuda):\n outPut = Variable(target_batch[:, len].data if is_teacher else top1).cuda()\n else:\n outPut = Variable(target_batch[:, len].data if is_teacher else top1)\n #outPuts:[seqLen,B,target_vocab_size]——其实是从1开始存的,0时间步结果为0!\n return outPuts","sub_path":"pytorch_seq2seq_final/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":21565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"482333751","text":"checkerBoard = [1, -1, 1]\n\n# find matrix of minors for first row of values\ndef getMatrixOfMinor(inputMatrix):\n minorMatrix = [[0 for x in range(2)], [0 for x in range(2)]]\n matrixOfMinor = []\n minorValues = []\n for j in range(3):\n for rows in range(1,3):\n for columns in range(3):\n if(columns == j):\n continue\n minorValues.append(inputMatrix[rows][columns])\n minorMatrix[0] = minorValues[0:2]\n minorMatrix[1] = minorValues[2:4]\n minorValues = []\n matrixOfMinor.append(minorMatrix[:])\n return matrixOfMinor\n\n# returns a determinant value of 2 by 2 matrix\ndef getDeterminant(input):\n determinant = (input[0][0] * input [1][1]) - (input[0][1] * input[1][0])\n return determinant\n\n# returns the determinant of 3 by 3 matrix by calculating determinant for all the minors corresponding to 1st row\ndef calculateDeterminant(inputMatrix, matrixOfMinor):\n solution = 0\n for column in range(3):\n coeff = inputMatrix[0][column]\n coeff = coeff * checkerBoard[column]\n solution += coeff * getDeterminant(matrixOfMinor[column])\n return solution\n\n# returns boolean value based on whether given determinant is singular or non-singular\ndef isSingular(determinant):\n print(\"determinant: \" + str(determinant))\n if(determinant == 0):\n print(\"Given matrix is singular\")\n return True\n else:\n print(\"Given matrix is non-singular\")\n return False\n\n# Reads the file and creates matrix based on the data inside file\ndef getInputFromFile(filename):\n with open(filename, \"r\") as data:\n values = data.readlines()\n valueList = []\n for value in values:\n value = value.strip()\n value = value.split(',')\n value = [float(x) for x in value]\n valueList.append(value[:])\n return valueList\n\n# A function that takes matrix as input and returns its determinant\ndef callFunctions(inputMatrix):\n matrixOfMinor = getMatrixOfMinor(inputMatrix)\n determinant = calculateDeterminant(inputMatrix, matrixOfMinor)\n return determinant\n\n#instructions\ninputFromFile = getInputFromFile(\"input.txt\")\nprint(\"MATRIX IN input.txt\")\nprint(inputFromFile)\nmatrixOfMinor = getMatrixOfMinor(inputFromFile)\ndeterminant = calculateDeterminant(inputFromFile, matrixOfMinor)\nisSingular(determinant)\n","sub_path":"Atharva Jadhav 20030142009/invertible.py","file_name":"invertible.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"338907222","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : JaeZheng\n# @Time : 2019/11/21 9:44\n# @File : torch_nn.py\n\nimport torch\nimport time\n\ndtype = torch.float\ndevice = torch.device(\"cpu\")\n# device = torch.device(\"cuda:0\") # 取消注释以在GPU上运行\n\nstart = time.time()\n# N是批量大小; D_in是输入维度;\n# 49/5000 H是隐藏的维度; D_out是输出维度。\nN, D_in, H, D_out = 64, 1000, 100, 10\n\n# 创建随机输入和输出数据\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\n\n# 随机初始化权重\nw1 = torch.randn(D_in, H, device=device, dtype=dtype)\nw2 = torch.randn(H, D_out, device=device, dtype=dtype)\n\nlearning_rate = 1e-6\nfor t in range(500):\n # forward\n h = x.mm(w1)\n h_relu = h.clamp(min=0)\n y_pred = h_relu.mm(w2)\n\n # 计算和打印损失\n loss = (y_pred - y).pow(2).sum().item()\n print(t, loss)\n\n # Backprop计算w1和w2相对于损耗的梯度\n grad_y_pred = 2.0*(y_pred-y)\n grad_w2 = h_relu.t().mm(grad_y_pred)\n grad_h_relu = grad_y_pred.mm(w2.t())\n grad_h = grad_h_relu.clone()\n grad_h[h<0]=0\n grad_w1 = x.t().mm(grad_h)\n\n # 使用梯度下降更新权重\n w1 -= learning_rate * grad_w1\n w2 -= learning_rate * grad_w2\n\nend = time.time()\nprint('Running Time: {}s'.format((end-start)))\n# cpu time: 0.20948123931884766s\n# gpu time: 3.6765434741973877s","sub_path":"chapter3/torch_nn.py","file_name":"torch_nn.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"343435330","text":"from Classification.Experiment.MultipleRun import MultipleRun\nfrom Classification.Experiment.Experiment import Experiment\nfrom Classification.Performance.ExperimentPerformance import ExperimentPerformance\nfrom Sampling.Bootstrap import Bootstrap\nfrom Classification.InstanceList.InstanceList import InstanceList\n\n\nclass BootstrapRun(MultipleRun):\n\n __numberOfBootstraps: int\n\n def __init__(self, numberOfBootstraps: int):\n \"\"\"\n Constructor for BootstrapRun class. Basically sets the number of bootstrap runs.\n\n PARAMETERS\n ----------\n numberOfBootstraps : int\n Number of bootstrap runs.\n \"\"\"\n self.__numberOfBootstraps = numberOfBootstraps\n\n def execute(self, experiment: Experiment) -> ExperimentPerformance:\n \"\"\"\n Execute the bootstrap run with the given classifier on the given data set using the given parameters.\n\n PARAMETERS\n ----------\n experiment : Experiment\n Experiment to be run.\n\n RETURNS\n -------\n ExperimentPerformance\n An ExperimentPerformance instance.\n \"\"\"\n result = ExperimentPerformance()\n for i in range(self.__numberOfBootstraps):\n bootstrap = Bootstrap(experiment.getDataSet().getInstances(), i + experiment.getParameter().getSeed())\n bootstrapSample = InstanceList(bootstrap.getSample())\n experiment.getClassifier().train(bootstrapSample, experiment.getParameter())\n result.add(experiment.getClassifier().test(experiment.getDataSet().getInstanceList()))\n return result\n","sub_path":"Classification/Experiment/BootstrapRun.py","file_name":"BootstrapRun.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"224488226","text":"import pytz\nfrom datetime import datetime\nfrom .models import ActivationKey\n\n# Copyright Videntity Systems Inc.\n\n\ndef validate_activation_key(activation_key):\n utc = pytz.UTC\n try:\n vc = ActivationKey.objects.get(key=activation_key)\n now = datetime.now().replace(tzinfo=utc)\n expires = vc.expires.replace(tzinfo=utc)\n\n if expires < now:\n vc.delete()\n # The key has expired\n return False\n except(ActivationKey.DoesNotExist):\n # The key does not exist\n return False\n # The key exists and has not expired.\n vc.user.is_active = True\n vc.user.save()\n vc.delete()\n return True\n","sub_path":"apps/accounts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"326083253","text":"import tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D\nfrom sklearn.model_selection import train_test_split\nimport training\nfrom tensorflow.keras.constraints import max_norm\n\n\n\nx,y = training.create_training_data() #load the data created using load\nx = x/255.0 #normalize the image data by scaling from 0-255 to 0-1\n\n#split the dataset into training and testing dataset\nxTrain, xTest, yTrain, yTest= train_test_split(x,y, test_size=0.01, random_state=0)\n\n\nmodel = Sequential()\nmodel.add(Conv2D(256, (3,3), input_shape=(100,100,1), padding='same', kernel_constraint=max_norm(3))) #1st hiddden layer\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\nmodel.add(Conv2D(256, (3,3),padding='same', kernel_constraint=max_norm(3))) #2nd hidden layer\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2))) \n\nmodel.add(Flatten()) #convert 3d output vector to 1d vector\n\nmodel.add(Dense(64, activation='relu')) #final output layer\nmodel.add(Dense(1, activation='sigmoid'))\n\n#compile the model\nmodel.compile(loss='binary_crossentropy',optimizer = 'adam', metrics=['accuracy'])\n\n\n#train the data \nmodel.fit(xTrain, yTrain, epochs=10, batch_size=25, verbose=1)\n\t\n#evaluate the model on the test dataset\n_,accuracy= model.evaluate(xTest,yTest)\nprint('Accuracy: ')\nprint(accuracy*100) \n \n#save the created model and weights to a h5 file\nmodel.save('cvd.h5')\n\n\n\n\n","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"571922801","text":"from collections import deque\n\n\ndef play_game(players, last_points):\n circle = deque([0])\n scores = [0] * players\n for i in range(1, last_points * 100 + 1):\n if i % 23 != 0:\n circle.rotate(-1)\n circle.append(i)\n else:\n circle.rotate(7)\n scores[i % players] += i + circle.pop()\n circle.rotate(-1)\n return max(scores)\n\n\nif __name__ == \"__main__\":\n with open('day09/input.txt') as inp:\n game_data = tuple(int(item) for i, item in enumerate(inp.read().split(' ')) if i in (0, 6))\n\n print(play_game(*game_data)) # 3212830280\n\n","sub_path":"2018/day09/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"18815271","text":"def spellCheck(textFileName): \n \"\"\"\n >>> spellCheck(\"test1.txt\")\n {'exercsie': 1, 'finised': 1}\n >>> spellCheck(\"test2.txt\")\n {'bechause': 1, 'c++': 1}\n >>> spellCheck(\"test3.txt\")\n {'lissard': 1, 'chamelon': 1, 'gerbal': 2, 'hampster': 3, 'tortise': 1}\n >>> type(spellCheck(\"test1.txt\"))\n \n \"\"\"\n # Use the open method to open the words file.\n file = open(\"words.txt\", \"r\")\n # Read the list of words into a list named wordsList\n words = file.readlines()\n wordsList =(words)\n # Close the file\n file.close()\n # Open the file whos name was provided as the textFileName variable\n file = open(textFileName, \"r\")\n # Read the text from the file into a list called wordsToCheck\n check = file.readlines()\n wordsToCheck = (check)\n # Close the file\n file.close()\n\n # You do not need to change these next lines they ensure that the return characters are removed from the words lists\n for i in range(0,len(wordsList)): wordsList[i]=wordsList[i].replace(\"\\n\",\"\")\n for i in range(0,len(wordsToCheck)): wordsToCheck[i]=wordsToCheck[i].replace(\"\\n\",\"\")\n\n\n # The next line creates the dictionary that we will use\n # This dictionary will have the word that has been spelt wrong as the key and the number of times it has been spelt wrong as the value\n spellingErrors= {}\n # Loop through the wordsToCheck list\n for s in wordsToCheck:\n # Change the current word into lower case\n s = s.lower()\n # If the current word does not exist in the wordsList then\n if s not in wordsList:\n # Check if the word already exists in the spellingErrors dictionary\n if s in spellingErrors:\n # If it does not exist than add it to the dictionary with the initial value of 1.\n spellingErrors[s] += 1\n else:\n # If it does exist in the dictionary then increase the value by 1\n spellingErrors[s] = 1\n\n # Return the dictionary\n return spellingErrors\n\n\n","sub_path":"SpellCheck.py","file_name":"SpellCheck.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"529182806","text":"import nltk\n\nclass Analyzer():\n \"\"\"Implements sentiment analysis.\"\"\"\n\n def __init__(self, positives, negatives):\n \"\"\"Initialize Analyzer.\"\"\"\n self.positives = [x.strip() for x in open(positives,'r').readlines() \n if x.startswith(\";\") == False and x.strip() != \"\"]\n self.negatives = [x.strip() for x in open(negatives,'r').readlines() \n if x.startswith(\";\") == False and x.strip() != \"\"]\n\n def analyze(self, text):\n \"\"\"Analyze text for sentiment, returning its score.\"\"\"\n \n # instantiate tokenizer\n tokenizer = nltk.tokenize.TweetTokenizer()\n \n # tokenize text and loop through, updating score per sentiment\n tokens = tokenizer.tokenize(text)\n score = 0\n for word in tokens:\n if word.lower() in self.positives:\n score += 1\n elif word.lower() in self.negatives:\n score -= 1\n return score\n\n","sub_path":"analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"641275988","text":"#Reads in file path as input and returns preprocessed dataframe\r\nimport pandas as pd\r\n\r\n\r\nclass Reader:\r\n \"\"\"Creates a dataframe containing the dataset, with changes for each dataset depending on what they need\"\"\"\r\n\r\n def __init__(self, file_path):\r\n \"\"\"Depending on the dataset chosen, performs dataset specific preprocessing before returning\r\n the edited dataset to be used by our model\"\"\"\r\n\r\n if \"abalone\" in file_path:\r\n self.df = pd.read_csv(file_path, header=None)\r\n\r\n elif \"glass\" in file_path:\r\n self.df = pd.read_csv(file_path, header=None)\r\n self.df.pop(0)\r\n elif \"house\" in file_path:\r\n #Replaces all question marks with a no vote and\r\n #moves the classification to the end of the dataframe\r\n\r\n self.df = pd.read_csv(file_path, header=None)\r\n last_col_index = self.df.columns[-1]\r\n #self.df.replace(\"?\", \"n\", inplace=True)\r\n temp_col = self.df.pop(0)\r\n self.df[last_col_index+1] = temp_col\r\n elif \"machine\" in file_path:\r\n #Removes the last column in the dataframe\r\n self.df = pd.read_csv(file_path, header=None)\r\n last_col_index = self.df.columns[-1]\r\n self.df.pop(last_col_index)\r\n elif \"segmentation\" in file_path:\r\n #Moves the classification column to the end\r\n self.df = pd.read_csv(file_path)\r\n temp_col = self.df.pop(\"CLASS\")\r\n self.df[\"CLASS\"] = temp_col\r\n else:\r\n self.df = pd.read_csv(file_path)\r\n self.df.columns = [*self.df.columns[:-1], 'Class'] #Renames the last column class\r\n\r\n","sub_path":"proj2/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"19538126","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 19 14:06:25 2020\n\n@author: jingwui\n\"\"\"\n\nfrom opentrons import protocol_api\n\n# metadata\nmetadata = {\n 'protocolName': 'Transfer recombinant DNA from 24 Well plates to 96 Well-plates in different combinations',\n 'apiLevel': '2.0'\n}\n\n# protocol run function. the part after the colon lets your editor know\n# where to look for autocomplete suggestions\ndef run(protocol: protocol_api.ProtocolContext):\n \n Falcon = protocol.load_labware('opentrons_24_tuberack_eppendorf_1.5ml_safelock_snapcap',4)\n plate = protocol.load_labware('corning_96_wellplate_360ul_flat', 1)\n tiprack_1 = protocol.load_labware('opentrons_96_tiprack_20ul', 5)\n tiprack_2 = protocol.load_labware('opentrons_96_tiprack_20ul', 6)\n p10 = protocol.load_instrument('p10_single', 'right', tip_racks=[tiprack_1, tiprack_2])\n n = 2 #refers to the number of variations of the same sample (i.e A1.A2...)\n i_A = 1\n i_B = 1\n i_C = 1\n count_B = 0\n count_C = 0\n \n# For D1 (Assembly Master Mix)\n p10.pick_up_tip()\n for r in range(2*n):\n p10.distribute(2, Falcon['D1'], plate.columns()[r][0:n], new_tip='never',disposal_volume = 0)\n p10.drop_tip()\n \n# For D2 (Backbone+pLac)\n p10.pick_up_tip()\n for r in range(n*2):\n p10.distribute(2, Falcon['D2'], plate.columns()[r][0:n], new_tip='never',disposal_volume = 0)\n p10.drop_tip()\n \n while i_A<(n+1):\n p10.pick_up_tip()\n for r in range(n):\n if 1< i_A <= 2:\n p10.distribute(2, Falcon[\"A\"+ str(i_A)], plate.rows()[r][n:2*n], \n new_tip = 'never')\n else:\n p10.distribute(2, Falcon[\"A\"+ str(i_A)], plate.rows()[r][0:n], \n new_tip = 'never')\n p10.drop_tip()\n \n i_A += 1\n \n while i_B<(n+1): \n p10.distribute(2, Falcon['B' + str(i_B)], plate.rows()[count_B][0:n], \n new_tip = 'always')\n p10.distribute(2, Falcon['B' + str(i_B)], plate.rows()[count_B][n:n*2], \n new_tip = 'always')\n i_B += 1\n count_B +=1\n \n while i_C<(n+1):\n p10.transfer(2, Falcon['C' + str(i_C)], plate.columns()[count_C][0:n], \n new_tip = 'always', blow_out = True)\n p10.transfer(2, Falcon['C' + str(i_C)], plate.columns()[count_C + n][0:n], \n new_tip = 'always', blow_out = True)\n\n \n i_C+= 1\n count_C +=1\n \n \n\n \n ","sub_path":"Opentrons/Protocol_transfer_for_2_Samples_8_combinations_for_96_well_plate.py","file_name":"Protocol_transfer_for_2_Samples_8_combinations_for_96_well_plate.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"458361462","text":"'''\n Modules for create node IDs and parsing xml\n'''\nimport uuid\nimport os\n\nfrom lxml import etree as ET\n\nfrom .oval_node import OvalNode\n\nns = {\n 'XMLSchema': 'http://oval.mitre.org/XMLSchema/oval-results-5',\n 'xccdf': 'http://checklists.nist.gov/xccdf/1.2',\n 'arf': 'http://scap.nist.gov/schema/asset-reporting-format/1.1',\n 'oval-definitions': 'http://oval.mitre.org/XMLSchema/oval-definitions-5',\n 'scap': 'http://scap.nist.gov/schema/scap/source/1.2',\n}\n\n\nclass XmlParser():\n def __init__(self, src):\n self.src = src\n self.tree = ET.parse(self.src)\n self.root = self.tree.getroot()\n if not self.validate(\n 'schemas/arf/1.1/asset-reporting-format_1.1.0.xsd'):\n raise ValueError(\"err- This is not arf report file.\")\n self.used_rules = self._get_used_rules()\n self.notselected_rules = self._get_notselected_rules()\n self.scan_definitions = self._get_scan()\n\n def get_src(self, src):\n _dir = os.path.dirname(os.path.realpath(__file__))\n FIXTURE_DIR = os.path.join(_dir, src)\n return str(FIXTURE_DIR)\n\n def validate(self, xsd_path):\n xsd_path = self.get_src(xsd_path)\n xmlschema_doc = ET.parse(xsd_path)\n xmlschema = ET.XMLSchema(xmlschema_doc)\n\n xml_doc = self.tree\n result = xmlschema.validate(xml_doc)\n\n return result\n\n def get_data(self, href):\n report_data = None\n reports = self.root.find('.//arf:reports', ns)\n for report in reports:\n if \"#\" + str(report.get(\"id\")) == href:\n report_data = report\n\n trees_data = report_data.find(\n ('.//XMLSchema:oval_results/XMLSchema:results/'\n 'XMLSchema:system/XMLSchema:definitions'), ns)\n return trees_data\n\n def _get_used_rules(self):\n rulesResults = self.root.findall(\n './/xccdf:TestResult/xccdf:rule-result', ns)\n rules = []\n for ruleResult in rulesResults:\n result = ruleResult.find('.//xccdf:result', ns)\n if result.text != \"notselected\":\n check_content_ref = ruleResult.find(\n './/xccdf:check/xccdf:check-content-ref', ns)\n if check_content_ref is not None:\n rules.append(dict(\n id_rule=ruleResult.get('idref'),\n id_def=check_content_ref.attrib.get('name'),\n href=check_content_ref.attrib.get('href'),\n result=result.text,\n ))\n return rules\n\n def _get_notselected_rules(self):\n rulesResults = self.root.findall(\n './/xccdf:TestResult/xccdf:rule-result', ns)\n rules = []\n for ruleResult in rulesResults:\n result = ruleResult.find('.//xccdf:result', ns)\n if result.text == \"notselected\":\n rules.append(dict(id_rule=ruleResult.get('idref')))\n return rules\n\n def _get_scan(self):\n scan = dict(definitions=[])\n for i in self.get_data(self.used_rules[0]['href']):\n scan['definitions'].append(self.build_graph(i))\n self.insert_comments(scan)\n return self._fill_extend_definition(scan)\n\n def parse_data_to_dict(self, rule_id):\n for definition in self.scan_definitions['definitions']:\n if self.get_def_id_by_rule_id(rule_id) == definition['id']:\n return dict(rule_id=rule_id, definition=definition)\n\n def _xml_dict_to_node(self, dict_of_definition):\n children = []\n for child in dict_of_definition['node']:\n if 'operator' in child and 'id':\n children.append(self._xml_dict_to_node(child))\n else:\n children.append(\n OvalNode(\n child['value_id'],\n 'value',\n child['value'],\n child['negate'],\n child['comment'],\n child['tag']\n ))\n\n if 'id' in dict_of_definition:\n children[0].node_id = dict_of_definition['id']\n return children[0]\n else:\n return OvalNode(\n str(uuid.uuid4()),\n 'operator',\n dict_of_definition['operator'],\n dict_of_definition['negate'],\n dict_of_definition['comment'],\n dict_of_definition['tag'],\n children,\n )\n\n def get_def_id_by_rule_id(self, rule_id):\n for rule in self.notselected_rules:\n if rule['id_rule'] == rule_id:\n raise ValueError(\n 'err- rule \"{}\" was not selected, so there are no results.'\n .format(rule_id))\n for rule in self.used_rules:\n if rule['id_rule'] == rule_id:\n return rule['id_def']\n raise ValueError('err- 404 rule not found!')\n\n def get_rule_dict(self, rule_id):\n return self.parse_data_to_dict(rule_id)\n\n def xml_dict_of_rule_to_node(self, rule):\n dict_of_definition = rule['definition']\n return OvalNode(\n rule['rule_id'],\n 'operator',\n 'and',\n False,\n dict_of_definition['comment'],\n \"Rule\",\n [self._xml_dict_to_node(dict_of_definition)],\n )\n\n def get_oval_tree(self, rule_id=None):\n return self.xml_dict_of_rule_to_node(self.parse_data_to_dict(rule_id))\n\n def build_graph(self, tree_data):\n graph = dict(\n id=tree_data.get('definition_id'),\n node=[],\n )\n for tree in tree_data:\n negate_status = False\n if 'negate' in tree:\n negate_status = self._str_to_bool(tree.get('negate'))\n graph['negate'] = negate_status\n graph['node'].append(self._build_node(tree, \"Definition\"))\n return graph\n\n def _str_to_bool(self, s):\n if s == 'true':\n return True\n elif s == 'false':\n return False\n else:\n raise ValueError('err- negation is not bool')\n\n def _build_node(self, tree, tag):\n negate_status = False\n if tree.get('negate') is not None:\n negate_status = self._str_to_bool(tree.get('negate'))\n\n node = dict(\n operator=tree.get('operator'),\n negate=negate_status,\n result=tree.get('result'),\n comment=None,\n tag=tag,\n node=[],\n )\n for child in tree:\n if child.get('operator') is not None:\n node['node'].append(self._build_node(child, \"Criteria\"))\n else:\n negate_status = False\n if child.get('negate') is not None:\n negate_status = self._str_to_bool(child.get('negate'))\n\n if child.get('definition_ref') is not None:\n node['node'].append(\n dict(\n extend_definition=child.get('definition_ref'),\n result=child.get('result'),\n negate=negate_status,\n comment=None,\n tag=\"Extend definition\",\n ))\n else:\n node['node'].append(\n dict(\n value_id=child.get('test_ref'),\n value=child.get('result'),\n negate=negate_status,\n comment=None,\n tag=\"Test\",\n ))\n return node\n\n def _fill_extend_definition(self, scan):\n out = dict(definitions=[])\n for definition in scan['definitions']:\n nodes = []\n for value in definition['node']:\n nodes.append(self._operator_as_child(value, scan))\n out['definitions'].append(\n dict(\n id=definition['id'],\n comment=definition['comment'],\n node=nodes,\n ))\n return out\n\n def _operator_as_child(self, value, scan):\n out = dict(\n operator=value['operator'],\n negate=value['negate'],\n result=value['result'],\n comment=value['comment'],\n tag=value['tag'],\n node=[],\n )\n for child in value['node']:\n if 'operator' in child:\n out['node'].append(self._operator_as_child(child, scan))\n elif 'extend_definition' in child:\n out['node'].append(\n self._find_definition_by_id(\n scan,\n child['extend_definition'],\n child['negate'],\n child['comment'],\n child['tag'],\n ))\n elif 'value_id' in child:\n out['node'].append(child)\n else:\n raise ValueError('error - unknown child')\n return out\n\n def _find_definition_by_id(self, scan, id, negate_status, comment, tag):\n for definition in scan['definitions']:\n if definition['id'] == id:\n definition['node'][0]['negate'] = negate_status\n definition['node'][0]['comment'] = comment\n definition['node'][0]['tag'] = tag\n return self._operator_as_child(definition['node'][0], scan)\n\n def create_dict_form_criteria(self, criteria, description):\n comments = dict(\n operator='AND' if criteria.get('operator') is None else criteria.get('operator'),\n comment=description if criteria.get('comment') is None else criteria.get('comment'),\n node=[],\n )\n for criterion in criteria:\n if criterion.get('operator'):\n comments['node'].append(\n self.create_dict_form_criteria(criterion, None))\n else:\n if criterion.get('definition_ref'):\n comments['node'].append(\n dict(\n extend_definition=criterion.get('definition_ref'),\n comment=criterion.get('comment'),\n ))\n else:\n comments['node'].append(\n dict(\n value_id=criterion.get('test_ref'),\n comment=criterion.get('comment'),\n ))\n return comments\n\n def _prepare_definition_comments(self):\n oval_definitions = self.root.find(\n './/arf:report-requests/arf:report-request/'\n 'arf:content/scap:data-stream-collection/'\n 'scap:component/oval-definitions:oval_definitions/'\n 'oval-definitions:definitions', ns)\n definitions = []\n for definition in oval_definitions:\n comment_definition = dict(\n id=definition.get('id'), comment=None, node=[])\n title = definition.find(\n './/oval-definitions:metadata/oval-definitions:title', ns)\n description = definition.find(\n './/oval-definitions:metadata/oval-definitions:description', ns)\n comment_definition['comment'] = title.text\n criteria = definition.find('.//oval-definitions:criteria', ns)\n comment_definition['node'].append(\n self.create_dict_form_criteria(criteria, description.text))\n definitions.append(comment_definition)\n return definitions\n\n def recursive_help_fill_comments(self, comments, nodes):\n out = nodes\n out['comment'] = comments['comment']\n for node, comment in zip(out['node'], comments['node']):\n node['comment'] = comment['comment']\n if 'operator' in node:\n self.recursive_help_fill_comments(comment, node)\n\n def fill_comment(self, comment_definition, data_definition):\n comments = comment_definition['node'][0]\n nodes = data_definition['node'][0]\n data_definition['comment'] = comment_definition['comment']\n self.recursive_help_fill_comments(comments, nodes)\n\n def insert_comments(self, data):\n comment_definitions = self._prepare_definition_comments()\n for data_definition in data['definitions']:\n for comment_definition in comment_definitions:\n if comment_definition['id'] == data_definition['id']:\n self.fill_comment(comment_definition, data_definition)\n","sub_path":"oval_graph/xml_parser.py","file_name":"xml_parser.py","file_ext":"py","file_size_in_byte":12712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"94653378","text":"from django.shortcuts import render , get_object_or_404,redirect\nfrom django.core.files.storage import FileSystemStorage \nfrom . models import Newsletter\nfrom news.models import News\nfrom cat.models import Cat\nfrom subcat.models import SubCat\nfrom trending.models import Trending\nfrom django.contrib.auth import authenticate, login , logout\nfrom django.contrib.auth.models import User, Group, Permission\nimport random\nfrom random import randint\nfrom passlib.hash import pbkdf2_sha256\nfrom django.contrib.auth.hashers import make_password, check_password\nfrom manager.models import Manager\n\n\ndef news_letter(request):\n\n if request.method == 'POST':\n txt = request.POST.get('newslettername')\n\n print(type(txt))\n\n res = txt.find('@')\n\n # need to work \n if int(res) != -1:\n\n b = Newsletter(txt=txt,status=1)\n b.save()\n else:\n try:\n if int(res) <=10 and res != -1:\n print(type(txt))\n b = Newsletter(txt=txt,status=2)\n b.save()\n else:\n msg = \"Number should be 10 Digit\"\n return render(request, 'front/msg.html',{'msg':msg})\n except:\n return redirect('home')\n \n\n return redirect('home')\n\n\ndef news_email(request):\n\n perm = 0\n for i in request.user.groups.all():\n if i.name == 'mastername' :perm = 1\n\n if not request.user.is_authenticated:\n return redirect(\"mylogin\")\n\n\n email = Newsletter.objects.all().exclude(status=1)\n\n return render(request, 'back/email.html',{'email':email})\n\ndef news_phone(request):\n\n perm = 0\n for i in request.user.groups.all():\n if i.name == 'mastername' :perm = 1\n\n if not request.user.is_authenticated:\n return redirect(\"mylogin\")\n\n\n phone = Newsletter.objects.filter(status=2)\n\n return render(request, 'back/phone.html',{'phone':phone})\n\n\ndef news_del(request ,pk,num):\n\n perm = 0\n for i in request.user.groups.all():\n if i.name == 'mastername' :perm = 1\n\n if not request.user.is_authenticated:\n return redirect(\"mylogin\")\n\n\n b = Newsletter.objects.get(pk=pk)\n b.delete()\n\n if int(num) == 2:\n return redirect('news_phone')\n\n return redirect('news_email')","sub_path":"newsletter/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"401975924","text":"import os\nimport random\nimport json\nimport string\nimport opencc\nimport pandas as pd\nfrom itertools import product\nfrom collections import Counter\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\nfrom bert import tokenization\nfrom helper import DataProcessor, InputExample, file_based_convert_examples_to_features\nfrom datasets import data_loader\nfrom composed_dataset import ComposedABSADataset\n\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)\n\nflags = tf.flags\nFLAGS = flags.FLAGS\nFILE_HOME = os.path.abspath(os.path.dirname(__file__))\n\nflags.DEFINE_string(\n \"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_string(\n \"dataset\", \"camera\",\n \"dataset: camera | car | notebook | phone | all\")\n\nflags.DEFINE_string(\n \"output_dir\", os.path.join(FILE_HOME, \"./output\"),\n \"The output directory where the model checkpoints will be written.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 128,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\n\ndef dump_to_tfrecords():\n if not tf.io.gfile.exists(os.path.join(FLAGS.output_dir, \"tfrecords\")):\n tf.gfile.MakeDirs(os.path.join(FLAGS.output_dir, \"tfrecords\"))\n\n cc = opencc.OpenCC('t2s')\n ds = ComposedABSADataset(FLAGS.dataset)\n all_labels = ds.get_labels()\n\n data_processor = DataProcessor(\n vocab_file=FLAGS.vocab_file,\n do_lower_case=FLAGS.do_lower_case,\n label_list=all_labels,\n max_seq_length=FLAGS.max_seq_length)\n data_processor.dump_to_file(os.path.join(FLAGS.output_dir, \"preprocessor.json\"))\n\n def create_examples(in_datas):\n examples = []\n for item in in_datas:\n examples.append(\n InputExample(text_a=item['text'], \n text_b=item['aspect'], \n has_positive=item['has_positive'], \n has_negative=item['has_negative']))\n return examples\n \n train_examples = create_examples(ds.train_data)\n dev_examples = create_examples(ds.test_data)\n\n random.shuffle(train_examples)\n train_file = os.path.join(FLAGS.output_dir, \"tfrecords/train.tf_record\")\n file_based_convert_examples_to_features(train_examples, data_processor, train_file)\n\n dev_file = os.path.join(FLAGS.output_dir, \"tfrecords/dev.tf_record\")\n file_based_convert_examples_to_features(dev_examples, data_processor, dev_file)\n\n\nif __name__ == \"__main__\":\n dump_to_tfrecords()\n","sub_path":"models/bert_spc_multilabel/data_prepare.py","file_name":"data_prepare.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"377239613","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/11/26 19:03\n# @Author : panxiaotong\n# @Description : extract samples from SimLex-999\n\nimport sys\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print(\"extract_from_SL999 \")\n sys.exit()\n\n output_file = open(sys.argv[2], 'w')\n with open(sys.argv[1], 'r') as f:\n for idx, line in enumerate(f):\n if idx == 0:\n continue\n elements = line.strip('\\r\\n').split('\\t')\n word_1 = elements[0]\n word_2 = elements[1]\n score = elements[3]\n output_file.write(word_1 + \"\\t\" + word_2 + \"\\t\" + score + \"\\n\")\n f.close()\n output_file.close()","sub_path":"src/data/eval_data/extract_from_SL999.py","file_name":"extract_from_SL999.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"508277567","text":"import logging\nimport os\n\nfrom exceptions.BotsicoteException import BotsicoteException\n\n\ndef expand_var_and_user(path):\n return os.path.expanduser(os.path.expandvars(path))\n\n\ndef check_fields_in_dict(dictionary, fields, dictionary_name):\n \"\"\"\n Check that the fields are in the dict and raise an exception if not\n\n :param dictionary: The dictionary in which the check has to be done\n :type dictionary: dict\n :param fields: List of fields to check\n :type fields: list[str]\n :param dictionary_name: name of the dictionary (for exception message purpose)\n :type dictionary_name: str\n :return: True if all the fields are in the given dictionary, raise an exception otherwise\n \"\"\"\n for field in fields:\n if field not in dictionary:\n raise BotsicoteException(\"%s field(s) required but not found in %s: %s\"\n % (\", \".join(fields), dictionary_name, str(dictionary)))\n return True\n\n\ndef format_raw_data(raw_data, time_step, result_key):\n \"\"\"\n Format raw data\n\n :param raw_data: The raw data\n :type raw_data: dict\n :param time_step: The time between each record\n :type time_step: int\n :param result_key: Key to access data in raw_data dict\n :type result_key: str\n :return: The formatted data list\n :rtype: list\n \"\"\"\n formatted_data = []\n check_fields_in_dict(raw_data, [\"result\"], \"Raw data\")\n check_fields_in_dict(raw_data[\"result\"], [result_key], \"Raw data\")\n for data in raw_data[\"result\"][result_key]:\n if len(data) == 8:\n formatted_data.append({\n \"id\": int(data[0] / time_step),\n \"time\": data[0],\n \"open\": float(data[1]),\n \"high\": float(data[2]),\n \"low\": float(data[3]),\n \"close\": float(data[4]),\n \"vwap\": float(data[5]),\n \"volume\": float(data[6]),\n \"count\": data[7]\n })\n else:\n logging.warning(\"Data should be composed of 8 fields: